Commit | Line | Data |
---|---|---|
14cf11af PM |
1 | /* |
2 | * PowerPC version | |
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
4 | * | |
5 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | |
6 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | |
7 | * Copyright (C) 1996 Paul Mackerras | |
8 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | |
9 | * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com) | |
10 | * | |
11 | * Derived from "arch/i386/mm/init.c" | |
12 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
13 | * | |
14 | * This program is free software; you can redistribute it and/or | |
15 | * modify it under the terms of the GNU General Public License | |
16 | * as published by the Free Software Foundation; either version | |
17 | * 2 of the License, or (at your option) any later version. | |
18 | * | |
19 | */ | |
20 | ||
21 | #include <linux/config.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/sched.h> | |
24 | #include <linux/kernel.h> | |
25 | #include <linux/errno.h> | |
26 | #include <linux/string.h> | |
27 | #include <linux/types.h> | |
28 | #include <linux/mm.h> | |
29 | #include <linux/stddef.h> | |
30 | #include <linux/init.h> | |
31 | #include <linux/bootmem.h> | |
32 | #include <linux/highmem.h> | |
33 | #include <linux/initrd.h> | |
34 | #include <linux/pagemap.h> | |
35 | ||
36 | #include <asm/pgalloc.h> | |
37 | #include <asm/prom.h> | |
38 | #include <asm/io.h> | |
39 | #include <asm/mmu_context.h> | |
40 | #include <asm/pgtable.h> | |
41 | #include <asm/mmu.h> | |
42 | #include <asm/smp.h> | |
43 | #include <asm/machdep.h> | |
44 | #include <asm/btext.h> | |
45 | #include <asm/tlb.h> | |
46 | #include <asm/bootinfo.h> | |
47 | #include <asm/prom.h> | |
48 | ||
49 | #include "mem_pieces.h" | |
50 | #include "mmu_decl.h" | |
51 | ||
52 | #if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL) | |
53 | /* The ammount of lowmem must be within 0xF0000000 - KERNELBASE. */ | |
54 | #if (CONFIG_LOWMEM_SIZE > (0xF0000000 - KERNELBASE)) | |
55 | #error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL" | |
56 | #endif | |
57 | #endif | |
58 | #define MAX_LOW_MEM CONFIG_LOWMEM_SIZE | |
59 | ||
60 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | |
61 | ||
62 | unsigned long total_memory; | |
63 | unsigned long total_lowmem; | |
64 | ||
65 | unsigned long ppc_memstart; | |
66 | unsigned long ppc_memoffset = PAGE_OFFSET; | |
67 | ||
68 | int mem_init_done; | |
69 | int init_bootmem_done; | |
70 | int boot_mapsize; | |
71 | #ifdef CONFIG_PPC_PMAC | |
72 | unsigned long agp_special_page; | |
73 | #endif | |
74 | ||
75 | extern char _end[]; | |
76 | extern char etext[], _stext[]; | |
77 | extern char __init_begin, __init_end; | |
78 | ||
79 | #ifdef CONFIG_HIGHMEM | |
80 | pte_t *kmap_pte; | |
81 | pgprot_t kmap_prot; | |
82 | ||
83 | EXPORT_SYMBOL(kmap_prot); | |
84 | EXPORT_SYMBOL(kmap_pte); | |
85 | #endif | |
86 | ||
87 | void MMU_init(void); | |
88 | void set_phys_avail(unsigned long total_ram); | |
89 | ||
90 | /* XXX should be in current.h -- paulus */ | |
91 | extern struct task_struct *current_set[NR_CPUS]; | |
92 | ||
93 | char *klimit = _end; | |
94 | struct mem_pieces phys_avail; | |
95 | struct device_node *memory_node; | |
96 | ||
97 | /* | |
98 | * this tells the system to map all of ram with the segregs | |
99 | * (i.e. page tables) instead of the bats. | |
100 | * -- Cort | |
101 | */ | |
102 | int __map_without_bats; | |
103 | int __map_without_ltlbs; | |
104 | ||
105 | /* max amount of RAM to use */ | |
106 | unsigned long __max_memory; | |
107 | /* max amount of low RAM to map in */ | |
108 | unsigned long __max_low_memory = MAX_LOW_MEM; | |
109 | ||
110 | /* | |
111 | * Read in a property describing some pieces of memory. | |
112 | */ | |
113 | static int __init get_mem_prop(char *name, struct mem_pieces *mp) | |
114 | { | |
115 | struct reg_property *rp; | |
116 | int i, s; | |
117 | unsigned int *ip; | |
118 | int nac = prom_n_addr_cells(memory_node); | |
119 | int nsc = prom_n_size_cells(memory_node); | |
120 | ||
121 | ip = (unsigned int *) get_property(memory_node, name, &s); | |
122 | if (ip == NULL) { | |
123 | printk(KERN_ERR "error: couldn't get %s property on /memory\n", | |
124 | name); | |
125 | return 0; | |
126 | } | |
127 | s /= (nsc + nac) * 4; | |
128 | rp = mp->regions; | |
129 | for (i = 0; i < s; ++i, ip += nac+nsc) { | |
130 | if (nac >= 2 && ip[nac-2] != 0) | |
131 | continue; | |
132 | rp->address = ip[nac-1]; | |
133 | if (nsc >= 2 && ip[nac+nsc-2] != 0) | |
134 | rp->size = ~0U; | |
135 | else | |
136 | rp->size = ip[nac+nsc-1]; | |
137 | ++rp; | |
138 | } | |
139 | mp->n_regions = rp - mp->regions; | |
140 | ||
141 | /* Make sure the pieces are sorted. */ | |
142 | mem_pieces_sort(mp); | |
143 | mem_pieces_coalesce(mp); | |
144 | return 1; | |
145 | } | |
146 | ||
147 | /* | |
148 | * Collect information about physical RAM and which pieces are | |
149 | * already in use from the device tree. | |
150 | */ | |
151 | unsigned long __init find_end_of_memory(void) | |
152 | { | |
153 | unsigned long a, total; | |
154 | struct mem_pieces phys_mem; | |
155 | ||
156 | /* | |
157 | * Find out where physical memory is, and check that it | |
158 | * starts at 0 and is contiguous. It seems that RAM is | |
159 | * always physically contiguous on Power Macintoshes. | |
160 | * | |
161 | * Supporting discontiguous physical memory isn't hard, | |
162 | * it just makes the virtual <-> physical mapping functions | |
163 | * more complicated (or else you end up wasting space | |
164 | * in mem_map). | |
165 | */ | |
166 | memory_node = find_devices("memory"); | |
167 | if (memory_node == NULL || !get_mem_prop("reg", &phys_mem) | |
168 | || phys_mem.n_regions == 0) | |
169 | panic("No RAM??"); | |
170 | a = phys_mem.regions[0].address; | |
171 | if (a != 0) | |
172 | panic("RAM doesn't start at physical address 0"); | |
173 | total = phys_mem.regions[0].size; | |
174 | ||
175 | if (phys_mem.n_regions > 1) { | |
176 | printk("RAM starting at 0x%x is not contiguous\n", | |
177 | phys_mem.regions[1].address); | |
178 | printk("Using RAM from 0 to 0x%lx\n", total-1); | |
179 | } | |
180 | ||
181 | return total; | |
182 | } | |
183 | ||
184 | /* | |
185 | * Check for command-line options that affect what MMU_init will do. | |
186 | */ | |
187 | void MMU_setup(void) | |
188 | { | |
189 | /* Check for nobats option (used in mapin_ram). */ | |
190 | if (strstr(cmd_line, "nobats")) { | |
191 | __map_without_bats = 1; | |
192 | } | |
193 | ||
194 | if (strstr(cmd_line, "noltlbs")) { | |
195 | __map_without_ltlbs = 1; | |
196 | } | |
197 | ||
198 | /* Look for mem= option on command line */ | |
199 | if (strstr(cmd_line, "mem=")) { | |
200 | char *p, *q; | |
201 | unsigned long maxmem = 0; | |
202 | ||
203 | for (q = cmd_line; (p = strstr(q, "mem=")) != 0; ) { | |
204 | q = p + 4; | |
205 | if (p > cmd_line && p[-1] != ' ') | |
206 | continue; | |
207 | maxmem = simple_strtoul(q, &q, 0); | |
208 | if (*q == 'k' || *q == 'K') { | |
209 | maxmem <<= 10; | |
210 | ++q; | |
211 | } else if (*q == 'm' || *q == 'M') { | |
212 | maxmem <<= 20; | |
213 | ++q; | |
214 | } | |
215 | } | |
216 | __max_memory = maxmem; | |
217 | } | |
218 | } | |
219 | ||
220 | /* | |
221 | * MMU_init sets up the basic memory mappings for the kernel, | |
222 | * including both RAM and possibly some I/O regions, | |
223 | * and sets up the page tables and the MMU hardware ready to go. | |
224 | */ | |
225 | void __init MMU_init(void) | |
226 | { | |
227 | if (ppc_md.progress) | |
228 | ppc_md.progress("MMU:enter", 0x111); | |
229 | ||
230 | /* parse args from command line */ | |
231 | MMU_setup(); | |
232 | ||
233 | /* | |
234 | * Figure out how much memory we have, how much | |
235 | * is lowmem, and how much is highmem. If we were | |
236 | * passed the total memory size from the bootloader, | |
237 | * just use it. | |
238 | */ | |
239 | if (boot_mem_size) | |
240 | total_memory = boot_mem_size; | |
241 | else | |
20c8c210 | 242 | total_memory = find_end_of_memory(); |
14cf11af PM |
243 | |
244 | if (__max_memory && total_memory > __max_memory) | |
245 | total_memory = __max_memory; | |
246 | total_lowmem = total_memory; | |
247 | #ifdef CONFIG_FSL_BOOKE | |
248 | /* Freescale Book-E parts expect lowmem to be mapped by fixed TLB | |
249 | * entries, so we need to adjust lowmem to match the amount we can map | |
250 | * in the fixed entries */ | |
251 | adjust_total_lowmem(); | |
252 | #endif /* CONFIG_FSL_BOOKE */ | |
253 | if (total_lowmem > __max_low_memory) { | |
254 | total_lowmem = __max_low_memory; | |
255 | #ifndef CONFIG_HIGHMEM | |
256 | total_memory = total_lowmem; | |
257 | #endif /* CONFIG_HIGHMEM */ | |
258 | } | |
259 | set_phys_avail(total_lowmem); | |
260 | ||
261 | /* Initialize the MMU hardware */ | |
262 | if (ppc_md.progress) | |
263 | ppc_md.progress("MMU:hw init", 0x300); | |
264 | MMU_init_hw(); | |
265 | ||
266 | /* Map in all of RAM starting at KERNELBASE */ | |
267 | if (ppc_md.progress) | |
268 | ppc_md.progress("MMU:mapin", 0x301); | |
269 | mapin_ram(); | |
270 | ||
271 | #ifdef CONFIG_HIGHMEM | |
272 | ioremap_base = PKMAP_BASE; | |
273 | #else | |
274 | ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */ | |
275 | #endif /* CONFIG_HIGHMEM */ | |
276 | ioremap_bot = ioremap_base; | |
277 | ||
278 | /* Map in I/O resources */ | |
279 | if (ppc_md.progress) | |
280 | ppc_md.progress("MMU:setio", 0x302); | |
281 | if (ppc_md.setup_io_mappings) | |
282 | ppc_md.setup_io_mappings(); | |
283 | ||
284 | /* Initialize the context management stuff */ | |
285 | mmu_context_init(); | |
286 | ||
287 | if (ppc_md.progress) | |
288 | ppc_md.progress("MMU:exit", 0x211); | |
289 | ||
290 | #ifdef CONFIG_BOOTX_TEXT | |
291 | /* By default, we are no longer mapped */ | |
292 | boot_text_mapped = 0; | |
293 | /* Must be done last, or ppc_md.progress will die. */ | |
294 | map_boot_text(); | |
295 | #endif | |
296 | } | |
297 | ||
298 | /* This is only called until mem_init is done. */ | |
299 | void __init *early_get_page(void) | |
300 | { | |
301 | void *p; | |
302 | ||
303 | if (init_bootmem_done) { | |
304 | p = alloc_bootmem_pages(PAGE_SIZE); | |
305 | } else { | |
306 | p = mem_pieces_find(PAGE_SIZE, PAGE_SIZE); | |
307 | } | |
308 | return p; | |
309 | } | |
310 | ||
311 | /* Free up now-unused memory */ | |
312 | static void free_sec(unsigned long start, unsigned long end, const char *name) | |
313 | { | |
314 | unsigned long cnt = 0; | |
315 | ||
316 | while (start < end) { | |
317 | ClearPageReserved(virt_to_page(start)); | |
318 | set_page_count(virt_to_page(start), 1); | |
319 | free_page(start); | |
320 | cnt++; | |
321 | start += PAGE_SIZE; | |
322 | } | |
323 | if (cnt) { | |
324 | printk(" %ldk %s", cnt << (PAGE_SHIFT - 10), name); | |
325 | totalram_pages += cnt; | |
326 | } | |
327 | } | |
328 | ||
329 | void free_initmem(void) | |
330 | { | |
331 | #define FREESEC(TYPE) \ | |
332 | free_sec((unsigned long)(&__ ## TYPE ## _begin), \ | |
333 | (unsigned long)(&__ ## TYPE ## _end), \ | |
334 | #TYPE); | |
335 | ||
336 | printk ("Freeing unused kernel memory:"); | |
337 | FREESEC(init); | |
338 | printk("\n"); | |
339 | ppc_md.progress = NULL; | |
340 | #undef FREESEC | |
341 | } | |
342 | ||
343 | #ifdef CONFIG_BLK_DEV_INITRD | |
344 | void free_initrd_mem(unsigned long start, unsigned long end) | |
345 | { | |
346 | if (start < end) | |
347 | printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | |
348 | for (; start < end; start += PAGE_SIZE) { | |
349 | ClearPageReserved(virt_to_page(start)); | |
350 | set_page_count(virt_to_page(start), 1); | |
351 | free_page(start); | |
352 | totalram_pages++; | |
353 | } | |
354 | } | |
355 | #endif | |
356 | ||
357 | /* | |
358 | * Initialize the bootmem system and give it all the memory we | |
359 | * have available. | |
360 | */ | |
361 | void __init do_init_bootmem(void) | |
362 | { | |
363 | unsigned long start, size; | |
364 | int i; | |
365 | ||
366 | /* | |
367 | * Find an area to use for the bootmem bitmap. | |
368 | * We look for the first area which is at least | |
369 | * 128kB in length (128kB is enough for a bitmap | |
370 | * for 4GB of memory, using 4kB pages), plus 1 page | |
371 | * (in case the address isn't page-aligned). | |
372 | */ | |
373 | start = 0; | |
374 | size = 0; | |
375 | for (i = 0; i < phys_avail.n_regions; ++i) { | |
376 | unsigned long a = phys_avail.regions[i].address; | |
377 | unsigned long s = phys_avail.regions[i].size; | |
378 | if (s <= size) | |
379 | continue; | |
380 | start = a; | |
381 | size = s; | |
382 | if (s >= 33 * PAGE_SIZE) | |
383 | break; | |
384 | } | |
385 | start = PAGE_ALIGN(start); | |
386 | ||
387 | min_low_pfn = start >> PAGE_SHIFT; | |
388 | max_low_pfn = (PPC_MEMSTART + total_lowmem) >> PAGE_SHIFT; | |
389 | max_pfn = (PPC_MEMSTART + total_memory) >> PAGE_SHIFT; | |
390 | boot_mapsize = init_bootmem_node(&contig_page_data, min_low_pfn, | |
391 | PPC_MEMSTART >> PAGE_SHIFT, | |
392 | max_low_pfn); | |
393 | ||
394 | /* remove the bootmem bitmap from the available memory */ | |
395 | mem_pieces_remove(&phys_avail, start, boot_mapsize, 1); | |
396 | ||
397 | /* add everything in phys_avail into the bootmem map */ | |
398 | for (i = 0; i < phys_avail.n_regions; ++i) | |
399 | free_bootmem(phys_avail.regions[i].address, | |
400 | phys_avail.regions[i].size); | |
401 | ||
402 | init_bootmem_done = 1; | |
403 | } | |
404 | ||
405 | /* | |
406 | * paging_init() sets up the page tables - in fact we've already done this. | |
407 | */ | |
408 | void __init paging_init(void) | |
409 | { | |
410 | unsigned long zones_size[MAX_NR_ZONES], i; | |
411 | ||
412 | #ifdef CONFIG_HIGHMEM | |
413 | map_page(PKMAP_BASE, 0, 0); /* XXX gross */ | |
414 | pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k | |
415 | (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE); | |
416 | map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */ | |
417 | kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k | |
418 | (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN); | |
419 | kmap_prot = PAGE_KERNEL; | |
420 | #endif /* CONFIG_HIGHMEM */ | |
421 | ||
422 | /* | |
423 | * All pages are DMA-able so we put them all in the DMA zone. | |
424 | */ | |
425 | zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT; | |
426 | for (i = 1; i < MAX_NR_ZONES; i++) | |
427 | zones_size[i] = 0; | |
428 | ||
429 | #ifdef CONFIG_HIGHMEM | |
430 | zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT; | |
431 | #endif /* CONFIG_HIGHMEM */ | |
432 | ||
433 | free_area_init(zones_size); | |
434 | } | |
435 | ||
436 | void __init mem_init(void) | |
437 | { | |
438 | unsigned long addr; | |
439 | int codepages = 0; | |
440 | int datapages = 0; | |
441 | int initpages = 0; | |
442 | #ifdef CONFIG_HIGHMEM | |
443 | unsigned long highmem_mapnr; | |
444 | ||
445 | highmem_mapnr = total_lowmem >> PAGE_SHIFT; | |
446 | #endif /* CONFIG_HIGHMEM */ | |
447 | max_mapnr = total_memory >> PAGE_SHIFT; | |
448 | ||
449 | high_memory = (void *) __va(PPC_MEMSTART + total_lowmem); | |
450 | num_physpages = max_mapnr; /* RAM is assumed contiguous */ | |
451 | ||
452 | totalram_pages += free_all_bootmem(); | |
453 | ||
454 | #ifdef CONFIG_BLK_DEV_INITRD | |
455 | /* if we are booted from BootX with an initial ramdisk, | |
456 | make sure the ramdisk pages aren't reserved. */ | |
457 | if (initrd_start) { | |
458 | for (addr = initrd_start; addr < initrd_end; addr += PAGE_SIZE) | |
459 | ClearPageReserved(virt_to_page(addr)); | |
460 | } | |
461 | #endif /* CONFIG_BLK_DEV_INITRD */ | |
462 | ||
463 | #ifdef CONFIG_PPC_OF | |
464 | /* mark the RTAS pages as reserved */ | |
465 | if ( rtas_data ) | |
466 | for (addr = (ulong)__va(rtas_data); | |
467 | addr < PAGE_ALIGN((ulong)__va(rtas_data)+rtas_size) ; | |
468 | addr += PAGE_SIZE) | |
469 | SetPageReserved(virt_to_page(addr)); | |
470 | #endif | |
471 | #ifdef CONFIG_PPC_PMAC | |
472 | if (agp_special_page) | |
473 | SetPageReserved(virt_to_page(agp_special_page)); | |
474 | #endif | |
475 | for (addr = PAGE_OFFSET; addr < (unsigned long)high_memory; | |
476 | addr += PAGE_SIZE) { | |
477 | if (!PageReserved(virt_to_page(addr))) | |
478 | continue; | |
479 | if (addr < (ulong) etext) | |
480 | codepages++; | |
481 | else if (addr >= (unsigned long)&__init_begin | |
482 | && addr < (unsigned long)&__init_end) | |
483 | initpages++; | |
484 | else if (addr < (ulong) klimit) | |
485 | datapages++; | |
486 | } | |
487 | ||
488 | #ifdef CONFIG_HIGHMEM | |
489 | { | |
490 | unsigned long pfn; | |
491 | ||
492 | for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { | |
493 | struct page *page = mem_map + pfn; | |
494 | ||
495 | ClearPageReserved(page); | |
496 | set_page_count(page, 1); | |
497 | __free_page(page); | |
498 | totalhigh_pages++; | |
499 | } | |
500 | totalram_pages += totalhigh_pages; | |
501 | } | |
502 | #endif /* CONFIG_HIGHMEM */ | |
503 | ||
504 | printk("Memory: %luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n", | |
505 | (unsigned long)nr_free_pages()<< (PAGE_SHIFT-10), | |
506 | codepages<< (PAGE_SHIFT-10), datapages<< (PAGE_SHIFT-10), | |
507 | initpages<< (PAGE_SHIFT-10), | |
508 | (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))); | |
509 | ||
510 | #ifdef CONFIG_PPC_PMAC | |
511 | if (agp_special_page) | |
512 | printk(KERN_INFO "AGP special page: 0x%08lx\n", agp_special_page); | |
513 | #endif | |
514 | ||
515 | mem_init_done = 1; | |
516 | } | |
517 | ||
518 | /* | |
519 | * Set phys_avail to the amount of physical memory, | |
520 | * less the kernel text/data/bss. | |
521 | */ | |
522 | void __init | |
523 | set_phys_avail(unsigned long total_memory) | |
524 | { | |
525 | unsigned long kstart, ksize; | |
526 | ||
527 | /* | |
528 | * Initially, available physical memory is equivalent to all | |
529 | * physical memory. | |
530 | */ | |
531 | ||
532 | phys_avail.regions[0].address = PPC_MEMSTART; | |
533 | phys_avail.regions[0].size = total_memory; | |
534 | phys_avail.n_regions = 1; | |
535 | ||
536 | /* | |
537 | * Map out the kernel text/data/bss from the available physical | |
538 | * memory. | |
539 | */ | |
540 | ||
541 | kstart = __pa(_stext); /* should be 0 */ | |
542 | ksize = PAGE_ALIGN(klimit - _stext); | |
543 | ||
544 | mem_pieces_remove(&phys_avail, kstart, ksize, 0); | |
545 | mem_pieces_remove(&phys_avail, 0, 0x4000, 0); | |
546 | ||
547 | #if defined(CONFIG_BLK_DEV_INITRD) | |
548 | /* Remove the init RAM disk from the available memory. */ | |
549 | if (initrd_start) { | |
550 | mem_pieces_remove(&phys_avail, __pa(initrd_start), | |
551 | initrd_end - initrd_start, 1); | |
552 | } | |
553 | #endif /* CONFIG_BLK_DEV_INITRD */ | |
554 | #ifdef CONFIG_PPC_OF | |
555 | /* remove the RTAS pages from the available memory */ | |
556 | if (rtas_data) | |
557 | mem_pieces_remove(&phys_avail, rtas_data, rtas_size, 1); | |
558 | #endif | |
559 | #ifdef CONFIG_PPC_PMAC | |
560 | /* Because of some uninorth weirdness, we need a page of | |
561 | * memory as high as possible (it must be outside of the | |
562 | * bus address seen as the AGP aperture). It will be used | |
563 | * by the r128 DRM driver | |
564 | * | |
565 | * FIXME: We need to make sure that page doesn't overlap any of the\ | |
566 | * above. This could be done by improving mem_pieces_find to be able | |
567 | * to do a backward search from the end of the list. | |
568 | */ | |
569 | if (_machine == _MACH_Pmac && find_devices("uni-north-agp")) { | |
570 | agp_special_page = (total_memory - PAGE_SIZE); | |
571 | mem_pieces_remove(&phys_avail, agp_special_page, PAGE_SIZE, 0); | |
572 | agp_special_page = (unsigned long)__va(agp_special_page); | |
573 | } | |
574 | #endif /* CONFIG_PPC_PMAC */ | |
575 | } | |
576 | ||
577 | /* Mark some memory as reserved by removing it from phys_avail. */ | |
578 | void __init reserve_phys_mem(unsigned long start, unsigned long size) | |
579 | { | |
580 | mem_pieces_remove(&phys_avail, start, size, 1); | |
581 | } |