.. | .. |
---|
19 | 19 | #include <asm/setup.h> |
---|
20 | 20 | #include <asm/segment.h> |
---|
21 | 21 | #include <asm/page.h> |
---|
22 | | -#include <asm/pgalloc.h> |
---|
23 | 22 | #include <asm/io.h> |
---|
| 23 | +#include <asm/tlbflush.h> |
---|
24 | 24 | |
---|
25 | 25 | #undef DEBUG |
---|
26 | | - |
---|
27 | | -#define PTRTREESIZE (256*1024) |
---|
28 | 26 | |
---|
29 | 27 | /* |
---|
30 | 28 | * For 040/060 we can use the virtual memory area like other architectures, |
---|
.. | .. |
---|
50 | 48 | |
---|
51 | 49 | #else |
---|
52 | 50 | |
---|
53 | | -#define IO_SIZE (256*1024) |
---|
| 51 | +#define IO_SIZE PMD_SIZE |
---|
54 | 52 | |
---|
55 | 53 | static struct vm_struct *iolist; |
---|
| 54 | + |
---|
| 55 | +/* |
---|
| 56 | + * __free_io_area unmaps nearly everything, so be careful |
---|
| 57 | + * Currently it doesn't free pointer/page tables anymore but this |
---|
| 58 | + * wasn't used anyway and might be added later. |
---|
| 59 | + */ |
---|
| 60 | +static void __free_io_area(void *addr, unsigned long size) |
---|
| 61 | +{ |
---|
| 62 | + unsigned long virtaddr = (unsigned long)addr; |
---|
| 63 | + pgd_t *pgd_dir; |
---|
| 64 | + p4d_t *p4d_dir; |
---|
| 65 | + pud_t *pud_dir; |
---|
| 66 | + pmd_t *pmd_dir; |
---|
| 67 | + pte_t *pte_dir; |
---|
| 68 | + |
---|
| 69 | + while ((long)size > 0) { |
---|
| 70 | + pgd_dir = pgd_offset_k(virtaddr); |
---|
| 71 | + p4d_dir = p4d_offset(pgd_dir, virtaddr); |
---|
| 72 | + pud_dir = pud_offset(p4d_dir, virtaddr); |
---|
| 73 | + if (pud_bad(*pud_dir)) { |
---|
| 74 | + printk("iounmap: bad pud(%08lx)\n", pud_val(*pud_dir)); |
---|
| 75 | + pud_clear(pud_dir); |
---|
| 76 | + return; |
---|
| 77 | + } |
---|
| 78 | + pmd_dir = pmd_offset(pud_dir, virtaddr); |
---|
| 79 | + |
---|
| 80 | +#if CONFIG_PGTABLE_LEVELS == 3 |
---|
| 81 | + if (CPU_IS_020_OR_030) { |
---|
| 82 | + int pmd_type = pmd_val(*pmd_dir) & _DESCTYPE_MASK; |
---|
| 83 | + |
---|
| 84 | + if (pmd_type == _PAGE_PRESENT) { |
---|
| 85 | + pmd_clear(pmd_dir); |
---|
| 86 | + virtaddr += PMD_SIZE; |
---|
| 87 | + size -= PMD_SIZE; |
---|
| 88 | + |
---|
| 89 | + } else if (pmd_type == 0) |
---|
| 90 | + continue; |
---|
| 91 | + } |
---|
| 92 | +#endif |
---|
| 93 | + |
---|
| 94 | + if (pmd_bad(*pmd_dir)) { |
---|
| 95 | + printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir)); |
---|
| 96 | + pmd_clear(pmd_dir); |
---|
| 97 | + return; |
---|
| 98 | + } |
---|
| 99 | + pte_dir = pte_offset_kernel(pmd_dir, virtaddr); |
---|
| 100 | + |
---|
| 101 | + pte_val(*pte_dir) = 0; |
---|
| 102 | + virtaddr += PAGE_SIZE; |
---|
| 103 | + size -= PAGE_SIZE; |
---|
| 104 | + } |
---|
| 105 | + |
---|
| 106 | + flush_tlb_all(); |
---|
| 107 | +} |
---|
56 | 108 | |
---|
57 | 109 | static struct vm_struct *get_io_area(unsigned long size) |
---|
58 | 110 | { |
---|
.. | .. |
---|
90 | 142 | if (tmp->addr == addr) { |
---|
91 | 143 | *p = tmp->next; |
---|
92 | 144 | /* remove gap added in get_io_area() */ |
---|
93 | | - __iounmap(tmp->addr, tmp->size - IO_SIZE); |
---|
| 145 | + __free_io_area(tmp->addr, tmp->size - IO_SIZE); |
---|
94 | 146 | kfree(tmp); |
---|
95 | 147 | return; |
---|
96 | 148 | } |
---|
.. | .. |
---|
110 | 162 | unsigned long virtaddr, retaddr; |
---|
111 | 163 | long offset; |
---|
112 | 164 | pgd_t *pgd_dir; |
---|
| 165 | + p4d_t *p4d_dir; |
---|
| 166 | + pud_t *pud_dir; |
---|
113 | 167 | pmd_t *pmd_dir; |
---|
114 | 168 | pte_t *pte_dir; |
---|
115 | 169 | |
---|
.. | .. |
---|
192 | 246 | |
---|
193 | 247 | while ((long)size > 0) { |
---|
194 | 248 | #ifdef DEBUG |
---|
195 | | - if (!(virtaddr & (PTRTREESIZE-1))) |
---|
| 249 | + if (!(virtaddr & (PMD_SIZE-1))) |
---|
196 | 250 | printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr); |
---|
197 | 251 | #endif |
---|
198 | 252 | pgd_dir = pgd_offset_k(virtaddr); |
---|
199 | | - pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr); |
---|
| 253 | + p4d_dir = p4d_offset(pgd_dir, virtaddr); |
---|
| 254 | + pud_dir = pud_offset(p4d_dir, virtaddr); |
---|
| 255 | + pmd_dir = pmd_alloc(&init_mm, pud_dir, virtaddr); |
---|
200 | 256 | if (!pmd_dir) { |
---|
201 | 257 | printk("ioremap: no mem for pmd_dir\n"); |
---|
202 | 258 | return NULL; |
---|
203 | 259 | } |
---|
204 | 260 | |
---|
| 261 | +#if CONFIG_PGTABLE_LEVELS == 3 |
---|
205 | 262 | if (CPU_IS_020_OR_030) { |
---|
206 | | - pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr; |
---|
207 | | - physaddr += PTRTREESIZE; |
---|
208 | | - virtaddr += PTRTREESIZE; |
---|
209 | | - size -= PTRTREESIZE; |
---|
210 | | - } else { |
---|
| 263 | + pmd_val(*pmd_dir) = physaddr; |
---|
| 264 | + physaddr += PMD_SIZE; |
---|
| 265 | + virtaddr += PMD_SIZE; |
---|
| 266 | + size -= PMD_SIZE; |
---|
| 267 | + } else |
---|
| 268 | +#endif |
---|
| 269 | + { |
---|
211 | 270 | pte_dir = pte_alloc_kernel(pmd_dir, virtaddr); |
---|
212 | 271 | if (!pte_dir) { |
---|
213 | 272 | printk("ioremap: no mem for pte_dir\n"); |
---|
.. | .. |
---|
250 | 309 | EXPORT_SYMBOL(iounmap); |
---|
251 | 310 | |
---|
252 | 311 | /* |
---|
253 | | - * __iounmap unmaps nearly everything, so be careful |
---|
254 | | - * Currently it doesn't free pointer/page tables anymore but this |
---|
255 | | - * wasn't used anyway and might be added later. |
---|
256 | | - */ |
---|
257 | | -void __iounmap(void *addr, unsigned long size) |
---|
258 | | -{ |
---|
259 | | - unsigned long virtaddr = (unsigned long)addr; |
---|
260 | | - pgd_t *pgd_dir; |
---|
261 | | - pmd_t *pmd_dir; |
---|
262 | | - pte_t *pte_dir; |
---|
263 | | - |
---|
264 | | - while ((long)size > 0) { |
---|
265 | | - pgd_dir = pgd_offset_k(virtaddr); |
---|
266 | | - if (pgd_bad(*pgd_dir)) { |
---|
267 | | - printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir)); |
---|
268 | | - pgd_clear(pgd_dir); |
---|
269 | | - return; |
---|
270 | | - } |
---|
271 | | - pmd_dir = pmd_offset(pgd_dir, virtaddr); |
---|
272 | | - |
---|
273 | | - if (CPU_IS_020_OR_030) { |
---|
274 | | - int pmd_off = (virtaddr/PTRTREESIZE) & 15; |
---|
275 | | - int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK; |
---|
276 | | - |
---|
277 | | - if (pmd_type == _PAGE_PRESENT) { |
---|
278 | | - pmd_dir->pmd[pmd_off] = 0; |
---|
279 | | - virtaddr += PTRTREESIZE; |
---|
280 | | - size -= PTRTREESIZE; |
---|
281 | | - continue; |
---|
282 | | - } else if (pmd_type == 0) |
---|
283 | | - continue; |
---|
284 | | - } |
---|
285 | | - |
---|
286 | | - if (pmd_bad(*pmd_dir)) { |
---|
287 | | - printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir)); |
---|
288 | | - pmd_clear(pmd_dir); |
---|
289 | | - return; |
---|
290 | | - } |
---|
291 | | - pte_dir = pte_offset_kernel(pmd_dir, virtaddr); |
---|
292 | | - |
---|
293 | | - pte_val(*pte_dir) = 0; |
---|
294 | | - virtaddr += PAGE_SIZE; |
---|
295 | | - size -= PAGE_SIZE; |
---|
296 | | - } |
---|
297 | | - |
---|
298 | | - flush_tlb_all(); |
---|
299 | | -} |
---|
300 | | - |
---|
301 | | -/* |
---|
302 | 312 | * Set new cache mode for some kernel address space. |
---|
303 | 313 | * The caller must push data for that range itself, if such data may already |
---|
304 | 314 | * be in the cache. |
---|
.. | .. |
---|
307 | 317 | { |
---|
308 | 318 | unsigned long virtaddr = (unsigned long)addr; |
---|
309 | 319 | pgd_t *pgd_dir; |
---|
| 320 | + p4d_t *p4d_dir; |
---|
| 321 | + pud_t *pud_dir; |
---|
310 | 322 | pmd_t *pmd_dir; |
---|
311 | 323 | pte_t *pte_dir; |
---|
312 | 324 | |
---|
.. | .. |
---|
341 | 353 | |
---|
342 | 354 | while ((long)size > 0) { |
---|
343 | 355 | pgd_dir = pgd_offset_k(virtaddr); |
---|
344 | | - if (pgd_bad(*pgd_dir)) { |
---|
345 | | - printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir)); |
---|
346 | | - pgd_clear(pgd_dir); |
---|
| 356 | + p4d_dir = p4d_offset(pgd_dir, virtaddr); |
---|
| 357 | + pud_dir = pud_offset(p4d_dir, virtaddr); |
---|
| 358 | + if (pud_bad(*pud_dir)) { |
---|
| 359 | + printk("iocachemode: bad pud(%08lx)\n", pud_val(*pud_dir)); |
---|
| 360 | + pud_clear(pud_dir); |
---|
347 | 361 | return; |
---|
348 | 362 | } |
---|
349 | | - pmd_dir = pmd_offset(pgd_dir, virtaddr); |
---|
| 363 | + pmd_dir = pmd_offset(pud_dir, virtaddr); |
---|
350 | 364 | |
---|
| 365 | +#if CONFIG_PGTABLE_LEVELS == 3 |
---|
351 | 366 | if (CPU_IS_020_OR_030) { |
---|
352 | | - int pmd_off = (virtaddr/PTRTREESIZE) & 15; |
---|
| 367 | + unsigned long pmd = pmd_val(*pmd_dir); |
---|
353 | 368 | |
---|
354 | | - if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) { |
---|
355 | | - pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] & |
---|
356 | | - _CACHEMASK040) | cmode; |
---|
357 | | - virtaddr += PTRTREESIZE; |
---|
358 | | - size -= PTRTREESIZE; |
---|
| 369 | + if ((pmd & _DESCTYPE_MASK) == _PAGE_PRESENT) { |
---|
| 370 | + *pmd_dir = __pmd((pmd & _CACHEMASK040) | cmode); |
---|
| 371 | + virtaddr += PMD_SIZE; |
---|
| 372 | + size -= PMD_SIZE; |
---|
359 | 373 | continue; |
---|
360 | 374 | } |
---|
361 | 375 | } |
---|
| 376 | +#endif |
---|
362 | 377 | |
---|
363 | 378 | if (pmd_bad(*pmd_dir)) { |
---|
364 | 379 | printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir)); |
---|