forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 61598093bbdd283a7edc367d900f223070ead8d2
kernel/arch/arm64/include/asm/cpufeature.h
....@@ -1,9 +1,6 @@
1
+/* SPDX-License-Identifier: GPL-2.0-only */
12 /*
23 * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of the GNU General Public License version 2 as
6
- * published by the Free Software Foundation.
74 */
85
96 #ifndef __ASM_CPUFEATURE_H
....@@ -14,15 +11,8 @@
1411 #include <asm/hwcap.h>
1512 #include <asm/sysreg.h>
1613
17
-/*
18
- * In the arm64 world (as in the ARM world), elf_hwcap is used both internally
19
- * in the kernel and for user space to keep track of which optional features
20
- * are supported by the current system. So let's map feature 'x' to HWCAP_x.
21
- * Note that HWCAP_x constants are bit fields so we need to take the log.
22
- */
23
-
24
-#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
25
-#define cpu_feature(x) ilog2(HWCAP_ ## x)
14
+#define MAX_CPU_FEATURES 64
15
+#define cpu_feature(x) KERNEL_HWCAP_ ## x
2616
2717 #ifndef __ASSEMBLY__
2818
....@@ -73,6 +63,11 @@
7363 s64 safe_val; /* safe value for FTR_EXACT features */
7464 };
7565
66
+struct arm64_ftr_override {
67
+ u64 val;
68
+ u64 mask;
69
+};
70
+
7671 /*
7772 * @arm64_ftr_reg - Feature register
7873 * @strict_mask Bits which should match across all CPUs for sanity.
....@@ -84,6 +79,7 @@
8479 u64 user_mask;
8580 u64 sys_val;
8681 u64 user_val;
82
+ struct arm64_ftr_override *override;
8783 const struct arm64_ftr_bits *ftr_bits;
8884 };
8985
....@@ -218,6 +214,10 @@
218214 * In some non-typical cases either both (a) and (b), or neither,
219215 * should be permitted. This can be described by including neither
220216 * or both flags in the capability's type field.
217
+ *
218
+ * In case of a conflict, the CPU is prevented from booting. If the
219
+ * ARM64_CPUCAP_PANIC_ON_CONFLICT flag is specified for the capability,
220
+ * then a kernel panic is triggered.
221221 */
222222
223223
....@@ -250,6 +250,8 @@
250250 #define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4))
251251 /* Is it safe for a late CPU to miss this capability when system has it */
252252 #define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5))
253
+/* Panic when a conflict is detected */
254
+#define ARM64_CPUCAP_PANIC_ON_CONFLICT ((u16)BIT(6))
253255
254256 /*
255257 * CPU errata workarounds that need to be enabled at boot time if one or
....@@ -263,7 +265,7 @@
263265 /*
264266 * CPU feature detected at boot time based on system-wide value of a
265267 * feature. It is safe for a late CPU to have this feature even though
266
- * the system hasn't enabled it, although the featuer will not be used
268
+ * the system hasn't enabled it, although the feature will not be used
267269 * by Linux in this case. If the system has enabled this feature already,
268270 * then every late CPU must have it.
269271 */
....@@ -272,6 +274,8 @@
272274 /*
273275 * CPU feature detected at boot time based on feature of one or more CPUs.
274276 * All possible conflicts for a late CPU are ignored.
277
+ * NOTE: this means that a late CPU with the feature will *not* cause the
278
+ * capability to be advertised by cpus_have_*cap()!
275279 */
276280 #define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE \
277281 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
....@@ -289,9 +293,20 @@
289293
290294 /*
291295 * CPU feature used early in the boot based on the boot CPU. All secondary
292
- * CPUs must match the state of the capability as detected by the boot CPU.
296
+ * CPUs must match the state of the capability as detected by the boot CPU. In
297
+ * case of a conflict, a kernel panic is triggered.
293298 */
294
-#define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE ARM64_CPUCAP_SCOPE_BOOT_CPU
299
+#define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE \
300
+ (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PANIC_ON_CONFLICT)
301
+
302
+/*
303
+ * CPU feature used early in the boot based on the boot CPU. It is safe for a
304
+ * late CPU to have this feature even though the boot CPU hasn't enabled it,
305
+ * although the feature will not be used by Linux in this case. If the boot CPU
306
+ * has enabled this feature already, then every late CPU must have it.
307
+ */
308
+#define ARM64_CPUCAP_BOOT_CPU_FEATURE \
309
+ (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
295310
296311 struct arm64_cpu_capabilities {
297312 const char *desc;
....@@ -299,9 +314,16 @@
299314 u16 type;
300315 bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
301316 /*
302
- * Take the appropriate actions to enable this capability for this CPU.
303
- * For each successfully booted CPU, this method is called for each
304
- * globally detected capability.
317
+ * Take the appropriate actions to configure this capability
318
+ * for this CPU. If the capability is detected by the kernel
319
+ * this will be called on all the CPUs in the system,
320
+ * including the hotplugged CPUs, regardless of whether the
321
+ * capability is available on that specific CPU. This is
322
+ * useful for some capabilities (e.g, working around CPU
323
+ * errata), where all the CPUs must take some action (e.g,
324
+ * changing system control/configuration). Thus, if an action
325
+ * is required only if the CPU has the capability, then the
326
+ * routine must check it before taking any action.
305327 */
306328 void (*cpu_enable)(const struct arm64_cpu_capabilities *cap);
307329 union {
....@@ -322,19 +344,20 @@
322344 bool sign;
323345 unsigned long hwcap;
324346 };
325
- /*
326
- * A list of "matches/cpu_enable" pair for the same
327
- * "capability" of the same "type" as described by the parent.
328
- * Only matches(), cpu_enable() and fields relevant to these
329
- * methods are significant in the list. The cpu_enable is
330
- * invoked only if the corresponding entry "matches()".
331
- * However, if a cpu_enable() method is associated
332
- * with multiple matches(), care should be taken that either
333
- * the match criteria are mutually exclusive, or that the
334
- * method is robust against being called multiple times.
335
- */
336
- const struct arm64_cpu_capabilities *match_list;
337347 };
348
+
349
+ /*
350
+ * An optional list of "matches/cpu_enable" pair for the same
351
+ * "capability" of the same "type" as described by the parent.
352
+ * Only matches(), cpu_enable() and fields relevant to these
353
+ * methods are significant in the list. The cpu_enable is
354
+ * invoked only if the corresponding entry "matches()".
355
+ * However, if a cpu_enable() method is associated
356
+ * with multiple matches(), care should be taken that either
357
+ * the match criteria are mutually exclusive, or that the
358
+ * method is robust against being called multiple times.
359
+ */
360
+ const struct arm64_cpu_capabilities *match_list;
338361 };
339362
340363 static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
....@@ -342,37 +365,71 @@
342365 return cap->type & ARM64_CPUCAP_SCOPE_MASK;
343366 }
344367
368
+/*
369
+ * Generic helper for handling capabilities with multiple (match,enable) pairs
370
+ * of call backs, sharing the same capability bit.
371
+ * Iterate over each entry to see if at least one matches.
372
+ */
345373 static inline bool
346
-cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
374
+cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry,
375
+ int scope)
347376 {
348
- return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU);
377
+ const struct arm64_cpu_capabilities *caps;
378
+
379
+ for (caps = entry->match_list; caps->matches; caps++)
380
+ if (caps->matches(caps, scope))
381
+ return true;
382
+
383
+ return false;
349384 }
350385
351
-static inline bool
352
-cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
386
+static __always_inline bool is_vhe_hyp_code(void)
353387 {
354
- return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
388
+ /* Only defined for code run in VHE hyp context */
389
+ return __is_defined(__KVM_VHE_HYPERVISOR__);
390
+}
391
+
392
+static __always_inline bool is_nvhe_hyp_code(void)
393
+{
394
+ /* Only defined for code run in NVHE hyp context */
395
+ return __is_defined(__KVM_NVHE_HYPERVISOR__);
396
+}
397
+
398
+static __always_inline bool is_hyp_code(void)
399
+{
400
+ return is_vhe_hyp_code() || is_nvhe_hyp_code();
355401 }
356402
357403 extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
358404 extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
359405 extern struct static_key_false arm64_const_caps_ready;
360406
407
+/* ARM64 CAPS + alternative_cb */
408
+#define ARM64_NPATCHABLE (ARM64_NCAPS + 1)
409
+extern DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
410
+
411
+#define for_each_available_cap(cap) \
412
+ for_each_set_bit(cap, cpu_hwcaps, ARM64_NCAPS)
413
+
361414 bool this_cpu_has_cap(unsigned int cap);
415
+void cpu_set_feature(unsigned int num);
416
+bool cpu_have_feature(unsigned int num);
417
+unsigned long cpu_get_elf_hwcap(void);
418
+unsigned long cpu_get_elf_hwcap2(void);
362419
363
-static inline bool cpu_have_feature(unsigned int num)
420
+#define cpu_set_named_feature(name) cpu_set_feature(cpu_feature(name))
421
+#define cpu_have_named_feature(name) cpu_have_feature(cpu_feature(name))
422
+
423
+static __always_inline bool system_capabilities_finalized(void)
364424 {
365
- return elf_hwcap & (1UL << num);
425
+ return static_branch_likely(&arm64_const_caps_ready);
366426 }
367427
368
-/* System capability check for constant caps */
369
-static inline bool __cpus_have_const_cap(int num)
370
-{
371
- if (num >= ARM64_NCAPS)
372
- return false;
373
- return static_branch_unlikely(&cpu_hwcap_keys[num]);
374
-}
375
-
428
+/*
429
+ * Test for a capability with a runtime check.
430
+ *
431
+ * Before the capability is detected, this returns false.
432
+ */
376433 static inline bool cpus_have_cap(unsigned int num)
377434 {
378435 if (num >= ARM64_NCAPS)
....@@ -380,9 +437,53 @@
380437 return test_bit(num, cpu_hwcaps);
381438 }
382439
383
-static inline bool cpus_have_const_cap(int num)
440
+/*
441
+ * Test for a capability without a runtime check.
442
+ *
443
+ * Before capabilities are finalized, this returns false.
444
+ * After capabilities are finalized, this is patched to avoid a runtime check.
445
+ *
446
+ * @num must be a compile-time constant.
447
+ */
448
+static __always_inline bool __cpus_have_const_cap(int num)
384449 {
385
- if (static_branch_likely(&arm64_const_caps_ready))
450
+ if (num >= ARM64_NCAPS)
451
+ return false;
452
+ return static_branch_unlikely(&cpu_hwcap_keys[num]);
453
+}
454
+
455
+/*
456
+ * Test for a capability without a runtime check.
457
+ *
458
+ * Before capabilities are finalized, this will BUG().
459
+ * After capabilities are finalized, this is patched to avoid a runtime check.
460
+ *
461
+ * @num must be a compile-time constant.
462
+ */
463
+static __always_inline bool cpus_have_final_cap(int num)
464
+{
465
+ if (system_capabilities_finalized())
466
+ return __cpus_have_const_cap(num);
467
+ else
468
+ BUG();
469
+}
470
+
471
+/*
472
+ * Test for a capability, possibly with a runtime check for non-hyp code.
473
+ *
474
+ * For hyp code, this behaves the same as cpus_have_final_cap().
475
+ *
476
+ * For non-hyp code:
477
+ * Before capabilities are finalized, this behaves as cpus_have_cap().
478
+ * After capabilities are finalized, this is patched to avoid a runtime check.
479
+ *
480
+ * @num must be a compile-time constant.
481
+ */
482
+static __always_inline bool cpus_have_const_cap(int num)
483
+{
484
+ if (is_hyp_code())
485
+ return cpus_have_final_cap(num);
486
+ else if (system_capabilities_finalized())
386487 return __cpus_have_const_cap(num);
387488 else
388489 return cpus_have_cap(num);
....@@ -410,16 +511,39 @@
410511 return cpuid_feature_extract_signed_field_width(features, field, 4);
411512 }
412513
413
-static inline unsigned int __attribute_const__
514
+static __always_inline unsigned int __attribute_const__
414515 cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
415516 {
416517 return (u64)(features << (64 - width - field)) >> (64 - width);
417518 }
418519
419
-static inline unsigned int __attribute_const__
520
+static __always_inline unsigned int __attribute_const__
420521 cpuid_feature_extract_unsigned_field(u64 features, int field)
421522 {
422523 return cpuid_feature_extract_unsigned_field_width(features, field, 4);
524
+}
525
+
526
+/*
527
+ * Fields that identify the version of the Performance Monitors Extension do
528
+ * not follow the standard ID scheme. See ARM DDI 0487E.a page D13-2825,
529
+ * "Alternative ID scheme used for the Performance Monitors Extension version".
530
+ */
531
+static inline u64 __attribute_const__
532
+cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap)
533
+{
534
+ u64 val = cpuid_feature_extract_unsigned_field(features, field);
535
+ u64 mask = GENMASK_ULL(field + 3, field);
536
+
537
+ /* Treat IMPLEMENTATION DEFINED functionality as unimplemented */
538
+ if (val == 0xf)
539
+ val = 0;
540
+
541
+ if (val > cap) {
542
+ features &= ~mask;
543
+ features |= (cap << field) & mask;
544
+ }
545
+
546
+ return features;
423547 }
424548
425549 static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp)
....@@ -457,6 +581,13 @@
457581 cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1;
458582 }
459583
584
+static inline bool id_aa64pfr0_32bit_el1(u64 pfr0)
585
+{
586
+ u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SHIFT);
587
+
588
+ return val == ID_AA64PFR0_EL1_32BIT_64BIT;
589
+}
590
+
460591 static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
461592 {
462593 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT);
....@@ -474,17 +605,91 @@
474605 void __init setup_cpu_features(void);
475606 void check_local_cpu_capabilities(void);
476607
477
-
478608 u64 read_sanitised_ftr_reg(u32 id);
609
+u64 __read_sysreg_by_encoding(u32 sys_id);
479610
480611 static inline bool cpu_supports_mixed_endian_el0(void)
481612 {
482613 return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
483614 }
484615
616
+
617
+static inline bool supports_csv2p3(int scope)
618
+{
619
+ u64 pfr0;
620
+ u8 csv2_val;
621
+
622
+ if (scope == SCOPE_LOCAL_CPU)
623
+ pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
624
+ else
625
+ pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
626
+
627
+ csv2_val = cpuid_feature_extract_unsigned_field(pfr0,
628
+ ID_AA64PFR0_CSV2_SHIFT);
629
+ return csv2_val == 3;
630
+}
631
+
632
+static inline bool supports_clearbhb(int scope)
633
+{
634
+ u64 isar2;
635
+
636
+ if (scope == SCOPE_LOCAL_CPU)
637
+ isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1);
638
+ else
639
+ isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
640
+
641
+ return cpuid_feature_extract_unsigned_field(isar2,
642
+ ID_AA64ISAR2_CLEARBHB_SHIFT);
643
+}
644
+
645
+const struct cpumask *system_32bit_el0_cpumask(void);
646
+DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
647
+
485648 static inline bool system_supports_32bit_el0(void)
486649 {
487
- return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
650
+ u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
651
+
652
+ return static_branch_unlikely(&arm64_mismatched_32bit_el0) ||
653
+ id_aa64pfr0_32bit_el0(pfr0);
654
+}
655
+
656
+static inline bool system_supports_4kb_granule(void)
657
+{
658
+ u64 mmfr0;
659
+ u32 val;
660
+
661
+ mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
662
+ val = cpuid_feature_extract_unsigned_field(mmfr0,
663
+ ID_AA64MMFR0_TGRAN4_SHIFT);
664
+
665
+ return (val >= ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN) &&
666
+ (val <= ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX);
667
+}
668
+
669
+static inline bool system_supports_64kb_granule(void)
670
+{
671
+ u64 mmfr0;
672
+ u32 val;
673
+
674
+ mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
675
+ val = cpuid_feature_extract_unsigned_field(mmfr0,
676
+ ID_AA64MMFR0_TGRAN64_SHIFT);
677
+
678
+ return (val >= ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN) &&
679
+ (val <= ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX);
680
+}
681
+
682
+static inline bool system_supports_16kb_granule(void)
683
+{
684
+ u64 mmfr0;
685
+ u32 val;
686
+
687
+ mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
688
+ val = cpuid_feature_extract_unsigned_field(mmfr0,
689
+ ID_AA64MMFR0_TGRAN16_SHIFT);
690
+
691
+ return (val >= ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN) &&
692
+ (val <= ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX);
488693 }
489694
490695 static inline bool system_supports_mixed_endian_el0(void)
....@@ -492,7 +697,19 @@
492697 return id_aa64mmfr0_mixed_endian_el0(read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1));
493698 }
494699
495
-static inline bool system_supports_fpsimd(void)
700
+static inline bool system_supports_mixed_endian(void)
701
+{
702
+ u64 mmfr0;
703
+ u32 val;
704
+
705
+ mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
706
+ val = cpuid_feature_extract_unsigned_field(mmfr0,
707
+ ID_AA64MMFR0_BIGENDEL_SHIFT);
708
+
709
+ return val == 0x1;
710
+}
711
+
712
+static __always_inline bool system_supports_fpsimd(void)
496713 {
497714 return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
498715 }
....@@ -503,29 +720,127 @@
503720 !cpus_have_const_cap(ARM64_HAS_PAN);
504721 }
505722
506
-static inline bool system_supports_sve(void)
723
+static __always_inline bool system_supports_sve(void)
507724 {
508725 return IS_ENABLED(CONFIG_ARM64_SVE) &&
509726 cpus_have_const_cap(ARM64_SVE);
510727 }
511728
512
-#define ARM64_SSBD_UNKNOWN -1
513
-#define ARM64_SSBD_FORCE_DISABLE 0
514
-#define ARM64_SSBD_KERNEL 1
515
-#define ARM64_SSBD_FORCE_ENABLE 2
516
-#define ARM64_SSBD_MITIGATED 3
517
-
518
-static inline int arm64_get_ssbd_state(void)
729
+static __always_inline bool system_supports_cnp(void)
519730 {
520
-#ifdef CONFIG_ARM64_SSBD
521
- extern int ssbd_state;
522
- return ssbd_state;
523
-#else
524
- return ARM64_SSBD_UNKNOWN;
525
-#endif
731
+ return IS_ENABLED(CONFIG_ARM64_CNP) &&
732
+ cpus_have_const_cap(ARM64_HAS_CNP);
526733 }
527734
528
-void arm64_set_ssbd_mitigation(bool state);
735
+static inline bool system_supports_address_auth(void)
736
+{
737
+ return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
738
+ cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH);
739
+}
740
+
741
+static inline bool system_supports_generic_auth(void)
742
+{
743
+ return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
744
+ cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH);
745
+}
746
+
747
+static inline bool system_has_full_ptr_auth(void)
748
+{
749
+ return system_supports_address_auth() && system_supports_generic_auth();
750
+}
751
+
752
+static __always_inline bool system_uses_irq_prio_masking(void)
753
+{
754
+ return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
755
+ cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING);
756
+}
757
+
758
+static inline bool system_supports_mte(void)
759
+{
760
+ return IS_ENABLED(CONFIG_ARM64_MTE) &&
761
+ cpus_have_const_cap(ARM64_MTE);
762
+}
763
+
764
+static inline bool system_has_prio_mask_debugging(void)
765
+{
766
+ return IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING) &&
767
+ system_uses_irq_prio_masking();
768
+}
769
+
770
+static inline bool system_supports_bti(void)
771
+{
772
+ return IS_ENABLED(CONFIG_ARM64_BTI) && cpus_have_const_cap(ARM64_BTI);
773
+}
774
+
775
+static inline bool system_supports_tlb_range(void)
776
+{
777
+ return IS_ENABLED(CONFIG_ARM64_TLB_RANGE) &&
778
+ cpus_have_const_cap(ARM64_HAS_TLB_RANGE);
779
+}
780
+
781
+extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
782
+
783
+static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
784
+{
785
+ switch (parange) {
786
+ case 0: return 32;
787
+ case 1: return 36;
788
+ case 2: return 40;
789
+ case 3: return 42;
790
+ case 4: return 44;
791
+ case 5: return 48;
792
+ case 6: return 52;
793
+ /*
794
+ * A future PE could use a value unknown to the kernel.
795
+ * However, by the "D10.1.4 Principles of the ID scheme
796
+ * for fields in ID registers", ARM DDI 0487C.a, any new
797
+ * value is guaranteed to be higher than what we know already.
798
+ * As a safe limit, we return the limit supported by the kernel.
799
+ */
800
+ default: return CONFIG_ARM64_PA_BITS;
801
+ }
802
+}
803
+
804
+/* Check whether hardware update of the Access flag is supported */
805
+static inline bool cpu_has_hw_af(void)
806
+{
807
+ u64 mmfr1;
808
+
809
+ if (!IS_ENABLED(CONFIG_ARM64_HW_AFDBM))
810
+ return false;
811
+
812
+ mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
813
+ return cpuid_feature_extract_unsigned_field(mmfr1,
814
+ ID_AA64MMFR1_HADBS_SHIFT);
815
+}
816
+
817
+#ifdef CONFIG_ARM64_AMU_EXTN
818
+/* Check whether the cpu supports the Activity Monitors Unit (AMU) */
819
+extern bool cpu_has_amu_feat(int cpu);
820
+#endif
821
+
822
+static inline unsigned int get_vmid_bits(u64 mmfr1)
823
+{
824
+ int vmid_bits;
825
+
826
+ vmid_bits = cpuid_feature_extract_unsigned_field(mmfr1,
827
+ ID_AA64MMFR1_VMIDBITS_SHIFT);
828
+ if (vmid_bits == ID_AA64MMFR1_VMIDBITS_16)
829
+ return 16;
830
+
831
+ /*
832
+ * Return the default here even if any reserved
833
+ * value is fetched from the system register.
834
+ */
835
+ return 8;
836
+}
837
+
838
+extern struct arm64_ftr_override id_aa64mmfr1_override;
839
+extern struct arm64_ftr_override id_aa64pfr1_override;
840
+extern struct arm64_ftr_override id_aa64isar1_override;
841
+
842
+u32 get_kvm_ipa_limit(void);
843
+void dump_cpu_features(void);
529844
530845 #endif /* __ASSEMBLY__ */
531846