for_each_zone_zonelist_nodemask(zone, z, zonelist,
MAX_NR_ZONES - 1, nodemask) {
for_each_zone_zonelist_nodemask(zone, z, zonelist,
MAX_NR_ZONES - 1, nodemask) {
* on its way out. We're lucky that the flag has such an appropriate
* name, and can in fact be safely cleared here. We could clear it
* before the __unmap_hugepage_range above, but all that's necessary
* on its way out. We're lucky that the flag has such an appropriate
* name, and can in fact be safely cleared here. We could clear it
* before the __unmap_hugepage_range above, but all that's necessary
* this mapping should be shared between all the VMAs,
* __unmap_hugepage_range() is called as the lock is already held
*/
* this mapping should be shared between all the VMAs,
* __unmap_hugepage_range() is called as the lock is already held
*/
vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
/* Do not unmap the current VMA */
if (iter_vma == vma)
vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
/* Do not unmap the current VMA */
if (iter_vma == vma)
unmap_hugepage_range(iter_vma, address,
address + huge_page_size(h), page);
}
unmap_hugepage_range(iter_vma, address,
address + huge_page_size(h), page);
}
flush_cache_range(vma, address, end);
mmu_notifier_invalidate_range_start(mm, start, end);
flush_cache_range(vma, address, end);
mmu_notifier_invalidate_range_start(mm, start, end);
for (; address < end; address += huge_page_size(h)) {
spinlock_t *ptl;
ptep = huge_pte_offset(mm, address);
for (; address < end; address += huge_page_size(h)) {
spinlock_t *ptl;
ptep = huge_pte_offset(mm, address);
* and that page table be reused and filled with junk.
*/
flush_tlb_range(vma, start, end);
* and that page table be reused and filled with junk.
*/
flush_tlb_range(vma, start, end);
mmu_notifier_invalidate_range_end(mm, start, end);
return pages << h->order;
mmu_notifier_invalidate_range_end(mm, start, end);
return pages << h->order;
* and returns the corresponding pte. While this is not necessary for the
* !shared pmd case because we can allocate the pmd later as well, it makes the
* code much cleaner. pmd allocation is essential for the shared case because
* and returns the corresponding pte. While this is not necessary for the
* !shared pmd case because we can allocate the pmd later as well, it makes the
* code much cleaner. pmd allocation is essential for the shared case because
if (!vma_shareable(vma, addr))
return (pte_t *)pmd_alloc(mm, pud, addr);
if (!vma_shareable(vma, addr))
return (pte_t *)pmd_alloc(mm, pud, addr);
vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
if (svma == vma)
continue;
vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
if (svma == vma)
continue;