hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/mm/highmem.c
....@@ -31,6 +31,10 @@
3131 #include <asm/tlbflush.h>
3232 #include <linux/vmalloc.h>
3333
34
+#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
35
+DEFINE_PER_CPU(int, __kmap_atomic_idx);
36
+#endif
37
+
3438 /*
3539 * Virtual_count is not a pure "count".
3640 * 0 means that it is not mapped, and has not been mapped
....@@ -104,7 +108,9 @@
104108 atomic_long_t _totalhigh_pages __read_mostly;
105109 EXPORT_SYMBOL(_totalhigh_pages);
106110
107
-unsigned int __nr_free_highpages (void)
111
+EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
112
+
113
+unsigned int nr_free_highpages (void)
108114 {
109115 struct zone *zone;
110116 unsigned int pages = 0;
....@@ -141,7 +147,7 @@
141147 do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
142148 #endif
143149
144
-struct page *__kmap_to_page(void *vaddr)
150
+struct page *kmap_to_page(void *vaddr)
145151 {
146152 unsigned long addr = (unsigned long)vaddr;
147153
....@@ -152,7 +158,7 @@
152158
153159 return virt_to_page(addr);
154160 }
155
-EXPORT_SYMBOL(__kmap_to_page);
161
+EXPORT_SYMBOL(kmap_to_page);
156162
157163 static void flush_all_zero_pkmaps(void)
158164 {
....@@ -194,7 +200,10 @@
194200 flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
195201 }
196202
197
-void __kmap_flush_unused(void)
203
+/**
204
+ * kmap_flush_unused - flush all unused kmap mappings in order to remove stray mappings
205
+ */
206
+void kmap_flush_unused(void)
198207 {
199208 lock_kmap();
200209 flush_all_zero_pkmaps();
....@@ -358,250 +367,9 @@
358367 if (need_wakeup)
359368 wake_up(pkmap_map_wait);
360369 }
370
+
361371 EXPORT_SYMBOL(kunmap_high);
362
-#endif /* CONFIG_HIGHMEM */
363
-
364
-#ifdef CONFIG_KMAP_LOCAL
365
-
366
-#include <asm/kmap_size.h>
367
-
368
-/*
369
- * With DEBUG_HIGHMEM the stack depth is doubled and every second
370
- * slot is unused which acts as a guard page
371
- */
372
-#ifdef CONFIG_DEBUG_HIGHMEM
373
-# define KM_INCR 2
374
-#else
375
-# define KM_INCR 1
376
-#endif
377
-
378
-static inline int kmap_local_idx_push(void)
379
-{
380
- WARN_ON_ONCE(in_irq() && !irqs_disabled());
381
- current->kmap_ctrl.idx += KM_INCR;
382
- BUG_ON(current->kmap_ctrl.idx >= KM_MAX_IDX);
383
- return current->kmap_ctrl.idx - 1;
384
-}
385
-
386
-static inline int kmap_local_idx(void)
387
-{
388
- return current->kmap_ctrl.idx - 1;
389
-}
390
-
391
-static inline void kmap_local_idx_pop(void)
392
-{
393
- current->kmap_ctrl.idx -= KM_INCR;
394
- BUG_ON(current->kmap_ctrl.idx < 0);
395
-}
396
-
397
-#ifndef arch_kmap_local_post_map
398
-# define arch_kmap_local_post_map(vaddr, pteval) do { } while (0)
399
-#endif
400
-
401
-#ifndef arch_kmap_local_pre_unmap
402
-# define arch_kmap_local_pre_unmap(vaddr) do { } while (0)
403
-#endif
404
-
405
-#ifndef arch_kmap_local_post_unmap
406
-# define arch_kmap_local_post_unmap(vaddr) do { } while (0)
407
-#endif
408
-
409
-#ifndef arch_kmap_local_map_idx
410
-#define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx)
411
-#endif
412
-
413
-#ifndef arch_kmap_local_unmap_idx
414
-#define arch_kmap_local_unmap_idx(idx, vaddr) kmap_local_calc_idx(idx)
415
-#endif
416
-
417
-#ifndef arch_kmap_local_high_get
418
-static inline void *arch_kmap_local_high_get(struct page *page)
419
-{
420
- return NULL;
421
-}
422
-#endif
423
-
424
-/* Unmap a local mapping which was obtained by kmap_high_get() */
425
-static inline bool kmap_high_unmap_local(unsigned long vaddr)
426
-{
427
-#ifdef ARCH_NEEDS_KMAP_HIGH_GET
428
- if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
429
- kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
430
- return true;
431
- }
432
-#endif
433
- return false;
434
-}
435
-
436
-static inline int kmap_local_calc_idx(int idx)
437
-{
438
- return idx + KM_MAX_IDX * smp_processor_id();
439
-}
440
-
441
-static pte_t *__kmap_pte;
442
-
443
-static pte_t *kmap_get_pte(void)
444
-{
445
- if (!__kmap_pte)
446
- __kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
447
- return __kmap_pte;
448
-}
449
-
450
-void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
451
-{
452
- pte_t pteval, *kmap_pte = kmap_get_pte();
453
- unsigned long vaddr;
454
- int idx;
455
-
456
- /*
457
- * Disable migration so resulting virtual address is stable
458
- * accross preemption.
459
- */
460
- migrate_disable();
461
- preempt_disable();
462
- idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
463
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
464
- BUG_ON(!pte_none(*(kmap_pte - idx)));
465
- pteval = pfn_pte(pfn, prot);
466
- set_pte_at(&init_mm, vaddr, kmap_pte - idx, pteval);
467
- arch_kmap_local_post_map(vaddr, pteval);
468
- current->kmap_ctrl.pteval[kmap_local_idx()] = pteval;
469
- preempt_enable();
470
-
471
- return (void *)vaddr;
472
-}
473
-EXPORT_SYMBOL_GPL(__kmap_local_pfn_prot);
474
-
475
-void *__kmap_local_page_prot(struct page *page, pgprot_t prot)
476
-{
477
- void *kmap;
478
-
479
- if (!PageHighMem(page))
480
- return page_address(page);
481
-
482
- /* Try kmap_high_get() if architecture has it enabled */
483
- kmap = arch_kmap_local_high_get(page);
484
- if (kmap)
485
- return kmap;
486
-
487
- return __kmap_local_pfn_prot(page_to_pfn(page), prot);
488
-}
489
-EXPORT_SYMBOL(__kmap_local_page_prot);
490
-
491
-void kunmap_local_indexed(void *vaddr)
492
-{
493
- unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
494
- pte_t *kmap_pte = kmap_get_pte();
495
- int idx;
496
-
497
- if (addr < __fix_to_virt(FIX_KMAP_END) ||
498
- addr > __fix_to_virt(FIX_KMAP_BEGIN)) {
499
- /*
500
- * Handle mappings which were obtained by kmap_high_get()
501
- * first as the virtual address of such mappings is below
502
- * PAGE_OFFSET. Warn for all other addresses which are in
503
- * the user space part of the virtual address space.
504
- */
505
- if (!kmap_high_unmap_local(addr))
506
- WARN_ON_ONCE(addr < PAGE_OFFSET);
507
- return;
508
- }
509
-
510
- preempt_disable();
511
- idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr);
512
- WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
513
-
514
- arch_kmap_local_pre_unmap(addr);
515
- pte_clear(&init_mm, addr, kmap_pte - idx);
516
- arch_kmap_local_post_unmap(addr);
517
- current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
518
- kmap_local_idx_pop();
519
- preempt_enable();
520
- migrate_enable();
521
-}
522
-EXPORT_SYMBOL(kunmap_local_indexed);
523
-
524
-/*
525
- * Invoked before switch_to(). This is safe even when during or after
526
- * clearing the maps an interrupt which needs a kmap_local happens because
527
- * the task::kmap_ctrl.idx is not modified by the unmapping code so a
528
- * nested kmap_local will use the next unused index and restore the index
529
- * on unmap. The already cleared kmaps of the outgoing task are irrelevant
530
- * because the interrupt context does not know about them. The same applies
531
- * when scheduling back in for an interrupt which happens before the
532
- * restore is complete.
533
- */
534
-void __kmap_local_sched_out(void)
535
-{
536
- struct task_struct *tsk = current;
537
- pte_t *kmap_pte = kmap_get_pte();
538
- int i;
539
-
540
- /* Clear kmaps */
541
- for (i = 0; i < tsk->kmap_ctrl.idx; i++) {
542
- pte_t pteval = tsk->kmap_ctrl.pteval[i];
543
- unsigned long addr;
544
- int idx;
545
-
546
- /* With debug all even slots are unmapped and act as guard */
547
- if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) {
548
- WARN_ON_ONCE(!pte_none(pteval));
549
- continue;
550
- }
551
- if (WARN_ON_ONCE(pte_none(pteval)))
552
- continue;
553
-
554
- /*
555
- * This is a horrible hack for XTENSA to calculate the
556
- * coloured PTE index. Uses the PFN encoded into the pteval
557
- * and the map index calculation because the actual mapped
558
- * virtual address is not stored in task::kmap_ctrl.
559
- * For any sane architecture this is optimized out.
560
- */
561
- idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
562
-
563
- addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
564
- arch_kmap_local_pre_unmap(addr);
565
- pte_clear(&init_mm, addr, kmap_pte - idx);
566
- arch_kmap_local_post_unmap(addr);
567
- }
568
-}
569
-
570
-void __kmap_local_sched_in(void)
571
-{
572
- struct task_struct *tsk = current;
573
- pte_t *kmap_pte = kmap_get_pte();
574
- int i;
575
-
576
- /* Restore kmaps */
577
- for (i = 0; i < tsk->kmap_ctrl.idx; i++) {
578
- pte_t pteval = tsk->kmap_ctrl.pteval[i];
579
- unsigned long addr;
580
- int idx;
581
-
582
- /* With debug all even slots are unmapped and act as guard */
583
- if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) {
584
- WARN_ON_ONCE(!pte_none(pteval));
585
- continue;
586
- }
587
- if (WARN_ON_ONCE(pte_none(pteval)))
588
- continue;
589
-
590
- /* See comment in __kmap_local_sched_out() */
591
- idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
592
- addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
593
- set_pte_at(&init_mm, addr, kmap_pte - idx, pteval);
594
- arch_kmap_local_post_map(addr, pteval);
595
- }
596
-}
597
-
598
-void kmap_local_fork(struct task_struct *tsk)
599
-{
600
- if (WARN_ON_ONCE(tsk->kmap_ctrl.idx))
601
- memset(&tsk->kmap_ctrl, 0, sizeof(tsk->kmap_ctrl));
602
-}
603
-
604
-#endif
372
+#endif /* CONFIG_HIGHMEM */
605373
606374 #if defined(HASHED_PAGE_VIRTUAL)
607375