.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright 2016,2017 IBM Corporation. |
---|
3 | | - * |
---|
4 | | - * This program is free software; you can redistribute it and/or |
---|
5 | | - * modify it under the terms of the GNU General Public License |
---|
6 | | - * as published by the Free Software Foundation; either version |
---|
7 | | - * 2 of the License, or (at your option) any later version. |
---|
8 | 4 | */ |
---|
9 | 5 | |
---|
10 | 6 | #define pr_fmt(fmt) "xive: " fmt |
---|
.. | .. |
---|
20 | 16 | #include <linux/cpumask.h> |
---|
21 | 17 | #include <linux/mm.h> |
---|
22 | 18 | #include <linux/delay.h> |
---|
| 19 | +#include <linux/libfdt.h> |
---|
23 | 20 | |
---|
| 21 | +#include <asm/machdep.h> |
---|
24 | 22 | #include <asm/prom.h> |
---|
25 | 23 | #include <asm/io.h> |
---|
26 | 24 | #include <asm/smp.h> |
---|
.. | .. |
---|
29 | 27 | #include <asm/xive.h> |
---|
30 | 28 | #include <asm/xive-regs.h> |
---|
31 | 29 | #include <asm/hvcall.h> |
---|
| 30 | +#include <asm/svm.h> |
---|
| 31 | +#include <asm/ultravisor.h> |
---|
32 | 32 | |
---|
33 | 33 | #include "xive-internal.h" |
---|
34 | 34 | |
---|
.. | .. |
---|
48 | 48 | { |
---|
49 | 49 | struct xive_irq_bitmap *xibm; |
---|
50 | 50 | |
---|
51 | | - xibm = kzalloc(sizeof(*xibm), GFP_ATOMIC); |
---|
| 51 | + xibm = kzalloc(sizeof(*xibm), GFP_KERNEL); |
---|
52 | 52 | if (!xibm) |
---|
53 | 53 | return -ENOMEM; |
---|
54 | 54 | |
---|
.. | .. |
---|
56 | 56 | xibm->base = base; |
---|
57 | 57 | xibm->count = count; |
---|
58 | 58 | xibm->bitmap = kzalloc(xibm->count, GFP_KERNEL); |
---|
| 59 | + if (!xibm->bitmap) { |
---|
| 60 | + kfree(xibm); |
---|
| 61 | + return -ENOMEM; |
---|
| 62 | + } |
---|
59 | 63 | list_add(&xibm->list, &xive_irq_bitmaps); |
---|
60 | 64 | |
---|
61 | 65 | pr_info("Using IRQ range [%x-%x]", xibm->base, |
---|
.. | .. |
---|
210 | 214 | lisn, target, prio, rc); |
---|
211 | 215 | return rc; |
---|
212 | 216 | } |
---|
| 217 | + |
---|
| 218 | + return 0; |
---|
| 219 | +} |
---|
| 220 | + |
---|
| 221 | +static long plpar_int_get_source_config(unsigned long flags, |
---|
| 222 | + unsigned long lisn, |
---|
| 223 | + unsigned long *target, |
---|
| 224 | + unsigned long *prio, |
---|
| 225 | + unsigned long *sw_irq) |
---|
| 226 | +{ |
---|
| 227 | + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; |
---|
| 228 | + long rc; |
---|
| 229 | + |
---|
| 230 | + pr_devel("H_INT_GET_SOURCE_CONFIG flags=%lx lisn=%lx\n", flags, lisn); |
---|
| 231 | + |
---|
| 232 | + do { |
---|
| 233 | + rc = plpar_hcall(H_INT_GET_SOURCE_CONFIG, retbuf, flags, lisn, |
---|
| 234 | + target, prio, sw_irq); |
---|
| 235 | + } while (plpar_busy_delay(rc)); |
---|
| 236 | + |
---|
| 237 | + if (rc) { |
---|
| 238 | + pr_err("H_INT_GET_SOURCE_CONFIG lisn=%ld failed %ld\n", |
---|
| 239 | + lisn, rc); |
---|
| 240 | + return rc; |
---|
| 241 | + } |
---|
| 242 | + |
---|
| 243 | + *target = retbuf[0]; |
---|
| 244 | + *prio = retbuf[1]; |
---|
| 245 | + *sw_irq = retbuf[2]; |
---|
| 246 | + |
---|
| 247 | + pr_devel("H_INT_GET_SOURCE_CONFIG target=%lx prio=%lx sw_irq=%lx\n", |
---|
| 248 | + retbuf[0], retbuf[1], retbuf[2]); |
---|
213 | 249 | |
---|
214 | 250 | return 0; |
---|
215 | 251 | } |
---|
.. | .. |
---|
389 | 425 | |
---|
390 | 426 | data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift); |
---|
391 | 427 | if (!data->trig_mmio) { |
---|
| 428 | + iounmap(data->eoi_mmio); |
---|
392 | 429 | pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq); |
---|
393 | 430 | return -ENOMEM; |
---|
394 | 431 | } |
---|
.. | .. |
---|
401 | 438 | |
---|
402 | 439 | rc = plpar_int_set_source_config(XIVE_SRC_SET_EISN, hw_irq, target, |
---|
403 | 440 | prio, sw_irq); |
---|
| 441 | + |
---|
| 442 | + return rc == 0 ? 0 : -ENXIO; |
---|
| 443 | +} |
---|
| 444 | + |
---|
| 445 | +static int xive_spapr_get_irq_config(u32 hw_irq, u32 *target, u8 *prio, |
---|
| 446 | + u32 *sw_irq) |
---|
| 447 | +{ |
---|
| 448 | + long rc; |
---|
| 449 | + unsigned long h_target; |
---|
| 450 | + unsigned long h_prio; |
---|
| 451 | + unsigned long h_sw_irq; |
---|
| 452 | + |
---|
| 453 | + rc = plpar_int_get_source_config(0, hw_irq, &h_target, &h_prio, |
---|
| 454 | + &h_sw_irq); |
---|
| 455 | + |
---|
| 456 | + *target = h_target; |
---|
| 457 | + *prio = h_prio; |
---|
| 458 | + *sw_irq = h_sw_irq; |
---|
404 | 459 | |
---|
405 | 460 | return rc == 0 ? 0 : -ENXIO; |
---|
406 | 461 | } |
---|
.. | .. |
---|
450 | 505 | rc = -EIO; |
---|
451 | 506 | } else { |
---|
452 | 507 | q->qpage = qpage; |
---|
| 508 | + if (is_secure_guest()) |
---|
| 509 | + uv_share_page(PHYS_PFN(qpage_phys), |
---|
| 510 | + 1 << xive_alloc_order(order)); |
---|
453 | 511 | } |
---|
454 | 512 | fail: |
---|
455 | 513 | return rc; |
---|
.. | .. |
---|
483 | 541 | hw_cpu, prio); |
---|
484 | 542 | |
---|
485 | 543 | alloc_order = xive_alloc_order(xive_queue_shift); |
---|
| 544 | + if (is_secure_guest()) |
---|
| 545 | + uv_unshare_page(PHYS_PFN(__pa(q->qpage)), 1 << alloc_order); |
---|
486 | 546 | free_pages((unsigned long)q->qpage, alloc_order); |
---|
487 | 547 | q->qpage = NULL; |
---|
488 | 548 | } |
---|
.. | .. |
---|
594 | 654 | plpar_int_sync(0, hw_irq); |
---|
595 | 655 | } |
---|
596 | 656 | |
---|
| 657 | +static int xive_spapr_debug_show(struct seq_file *m, void *private) |
---|
| 658 | +{ |
---|
| 659 | + struct xive_irq_bitmap *xibm; |
---|
| 660 | + char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); |
---|
| 661 | + |
---|
| 662 | + if (!buf) |
---|
| 663 | + return -ENOMEM; |
---|
| 664 | + |
---|
| 665 | + list_for_each_entry(xibm, &xive_irq_bitmaps, list) { |
---|
| 666 | + memset(buf, 0, PAGE_SIZE); |
---|
| 667 | + bitmap_print_to_pagebuf(true, buf, xibm->bitmap, xibm->count); |
---|
| 668 | + seq_printf(m, "bitmap #%d: %s", xibm->count, buf); |
---|
| 669 | + } |
---|
| 670 | + kfree(buf); |
---|
| 671 | + |
---|
| 672 | + return 0; |
---|
| 673 | +} |
---|
| 674 | + |
---|
597 | 675 | static const struct xive_ops xive_spapr_ops = { |
---|
598 | 676 | .populate_irq_data = xive_spapr_populate_irq_data, |
---|
599 | 677 | .configure_irq = xive_spapr_configure_irq, |
---|
| 678 | + .get_irq_config = xive_spapr_get_irq_config, |
---|
600 | 679 | .setup_queue = xive_spapr_setup_queue, |
---|
601 | 680 | .cleanup_queue = xive_spapr_cleanup_queue, |
---|
602 | 681 | .match = xive_spapr_match, |
---|
.. | .. |
---|
610 | 689 | #ifdef CONFIG_SMP |
---|
611 | 690 | .get_ipi = xive_spapr_get_ipi, |
---|
612 | 691 | .put_ipi = xive_spapr_put_ipi, |
---|
| 692 | + .debug_show = xive_spapr_debug_show, |
---|
613 | 693 | #endif /* CONFIG_SMP */ |
---|
614 | 694 | .name = "spapr", |
---|
615 | 695 | }; |
---|
.. | .. |
---|
631 | 711 | } |
---|
632 | 712 | |
---|
633 | 713 | reg = of_get_property(rootdn, "ibm,plat-res-int-priorities", &len); |
---|
| 714 | + of_node_put(rootdn); |
---|
634 | 715 | if (!reg) { |
---|
635 | 716 | pr_err("Failed to read 'ibm,plat-res-int-priorities' property\n"); |
---|
636 | 717 | return false; |
---|
.. | .. |
---|
671 | 752 | return true; |
---|
672 | 753 | } |
---|
673 | 754 | |
---|
| 755 | +static const u8 *get_vec5_feature(unsigned int index) |
---|
| 756 | +{ |
---|
| 757 | + unsigned long root, chosen; |
---|
| 758 | + int size; |
---|
| 759 | + const u8 *vec5; |
---|
| 760 | + |
---|
| 761 | + root = of_get_flat_dt_root(); |
---|
| 762 | + chosen = of_get_flat_dt_subnode_by_name(root, "chosen"); |
---|
| 763 | + if (chosen == -FDT_ERR_NOTFOUND) |
---|
| 764 | + return NULL; |
---|
| 765 | + |
---|
| 766 | + vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size); |
---|
| 767 | + if (!vec5) |
---|
| 768 | + return NULL; |
---|
| 769 | + |
---|
| 770 | + if (size <= index) |
---|
| 771 | + return NULL; |
---|
| 772 | + |
---|
| 773 | + return vec5 + index; |
---|
| 774 | +} |
---|
| 775 | + |
---|
| 776 | +static bool __init xive_spapr_disabled(void) |
---|
| 777 | +{ |
---|
| 778 | + const u8 *vec5_xive; |
---|
| 779 | + |
---|
| 780 | + vec5_xive = get_vec5_feature(OV5_INDX(OV5_XIVE_SUPPORT)); |
---|
| 781 | + if (vec5_xive) { |
---|
| 782 | + u8 val; |
---|
| 783 | + |
---|
| 784 | + val = *vec5_xive & OV5_FEAT(OV5_XIVE_SUPPORT); |
---|
| 785 | + switch (val) { |
---|
| 786 | + case OV5_FEAT(OV5_XIVE_EITHER): |
---|
| 787 | + case OV5_FEAT(OV5_XIVE_LEGACY): |
---|
| 788 | + break; |
---|
| 789 | + case OV5_FEAT(OV5_XIVE_EXPLOIT): |
---|
| 790 | + /* Hypervisor only supports XIVE */ |
---|
| 791 | + if (xive_cmdline_disabled) |
---|
| 792 | + pr_warn("WARNING: Ignoring cmdline option xive=off\n"); |
---|
| 793 | + return false; |
---|
| 794 | + default: |
---|
| 795 | + pr_warn("%s: Unknown xive support option: 0x%x\n", |
---|
| 796 | + __func__, val); |
---|
| 797 | + break; |
---|
| 798 | + } |
---|
| 799 | + } |
---|
| 800 | + |
---|
| 801 | + return xive_cmdline_disabled; |
---|
| 802 | +} |
---|
| 803 | + |
---|
674 | 804 | bool __init xive_spapr_init(void) |
---|
675 | 805 | { |
---|
676 | 806 | struct device_node *np; |
---|
.. | .. |
---|
683 | 813 | const __be32 *reg; |
---|
684 | 814 | int i; |
---|
685 | 815 | |
---|
686 | | - if (xive_cmdline_disabled) |
---|
| 816 | + if (xive_spapr_disabled()) |
---|
687 | 817 | return false; |
---|
688 | 818 | |
---|
689 | 819 | pr_devel("%s()\n", __func__); |
---|
.. | .. |
---|
738 | 868 | pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10)); |
---|
739 | 869 | return true; |
---|
740 | 870 | } |
---|
| 871 | + |
---|
| 872 | +machine_arch_initcall(pseries, xive_core_debug_init); |
---|