.. | .. |
---|
40 | 40 | * membars, in order to allow proper address translation during |
---|
41 | 41 | * resource assignment to enable guest virtualization |
---|
42 | 42 | */ |
---|
43 | | - VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0), |
---|
| 43 | + VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0), |
---|
44 | 44 | |
---|
45 | 45 | /* |
---|
46 | 46 | * Device may provide root port configuration information which limits |
---|
47 | 47 | * bus numbering |
---|
48 | 48 | */ |
---|
49 | | - VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1), |
---|
| 49 | + VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1), |
---|
| 50 | + |
---|
| 51 | + /* |
---|
| 52 | + * Device contains physical location shadow registers in |
---|
| 53 | + * vendor-specific capability space |
---|
| 54 | + */ |
---|
| 55 | + VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP = (1 << 2), |
---|
50 | 56 | }; |
---|
51 | 57 | |
---|
52 | 58 | /* |
---|
.. | .. |
---|
98 | 104 | struct irq_domain *irq_domain; |
---|
99 | 105 | struct pci_bus *bus; |
---|
100 | 106 | u8 busn_start; |
---|
101 | | - |
---|
102 | | -#ifdef CONFIG_X86_DEV_DMA_OPS |
---|
103 | | - struct dma_map_ops dma_ops; |
---|
104 | | - struct dma_domain dma_domain; |
---|
105 | | -#endif |
---|
106 | 107 | }; |
---|
107 | 108 | |
---|
108 | 109 | static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus) |
---|
.. | .. |
---|
297 | 298 | .chip = &vmd_msi_controller, |
---|
298 | 299 | }; |
---|
299 | 300 | |
---|
300 | | -#ifdef CONFIG_X86_DEV_DMA_OPS |
---|
301 | | -/* |
---|
302 | | - * VMD replaces the requester ID with its own. DMA mappings for devices in a |
---|
303 | | - * VMD domain need to be mapped for the VMD, not the device requiring |
---|
304 | | - * the mapping. |
---|
305 | | - */ |
---|
306 | | -static struct device *to_vmd_dev(struct device *dev) |
---|
| 301 | +static int vmd_create_irq_domain(struct vmd_dev *vmd) |
---|
307 | 302 | { |
---|
308 | | - struct pci_dev *pdev = to_pci_dev(dev); |
---|
309 | | - struct vmd_dev *vmd = vmd_from_bus(pdev->bus); |
---|
| 303 | + struct fwnode_handle *fn; |
---|
310 | 304 | |
---|
311 | | - return &vmd->dev->dev; |
---|
| 305 | + fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain); |
---|
| 306 | + if (!fn) |
---|
| 307 | + return -ENODEV; |
---|
| 308 | + |
---|
| 309 | + vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, NULL); |
---|
| 310 | + if (!vmd->irq_domain) { |
---|
| 311 | + irq_domain_free_fwnode(fn); |
---|
| 312 | + return -ENODEV; |
---|
| 313 | + } |
---|
| 314 | + |
---|
| 315 | + return 0; |
---|
312 | 316 | } |
---|
313 | 317 | |
---|
314 | | -static const struct dma_map_ops *vmd_dma_ops(struct device *dev) |
---|
| 318 | +static void vmd_remove_irq_domain(struct vmd_dev *vmd) |
---|
315 | 319 | { |
---|
316 | | - return get_dma_ops(to_vmd_dev(dev)); |
---|
| 320 | + if (vmd->irq_domain) { |
---|
| 321 | + struct fwnode_handle *fn = vmd->irq_domain->fwnode; |
---|
| 322 | + |
---|
| 323 | + irq_domain_remove(vmd->irq_domain); |
---|
| 324 | + irq_domain_free_fwnode(fn); |
---|
| 325 | + } |
---|
317 | 326 | } |
---|
318 | | - |
---|
319 | | -static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr, |
---|
320 | | - gfp_t flag, unsigned long attrs) |
---|
321 | | -{ |
---|
322 | | - return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag, |
---|
323 | | - attrs); |
---|
324 | | -} |
---|
325 | | - |
---|
326 | | -static void vmd_free(struct device *dev, size_t size, void *vaddr, |
---|
327 | | - dma_addr_t addr, unsigned long attrs) |
---|
328 | | -{ |
---|
329 | | - return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr, |
---|
330 | | - attrs); |
---|
331 | | -} |
---|
332 | | - |
---|
333 | | -static int vmd_mmap(struct device *dev, struct vm_area_struct *vma, |
---|
334 | | - void *cpu_addr, dma_addr_t addr, size_t size, |
---|
335 | | - unsigned long attrs) |
---|
336 | | -{ |
---|
337 | | - return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr, |
---|
338 | | - size, attrs); |
---|
339 | | -} |
---|
340 | | - |
---|
341 | | -static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt, |
---|
342 | | - void *cpu_addr, dma_addr_t addr, size_t size, |
---|
343 | | - unsigned long attrs) |
---|
344 | | -{ |
---|
345 | | - return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr, |
---|
346 | | - addr, size, attrs); |
---|
347 | | -} |
---|
348 | | - |
---|
349 | | -static dma_addr_t vmd_map_page(struct device *dev, struct page *page, |
---|
350 | | - unsigned long offset, size_t size, |
---|
351 | | - enum dma_data_direction dir, |
---|
352 | | - unsigned long attrs) |
---|
353 | | -{ |
---|
354 | | - return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size, |
---|
355 | | - dir, attrs); |
---|
356 | | -} |
---|
357 | | - |
---|
358 | | -static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size, |
---|
359 | | - enum dma_data_direction dir, unsigned long attrs) |
---|
360 | | -{ |
---|
361 | | - vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs); |
---|
362 | | -} |
---|
363 | | - |
---|
364 | | -static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
---|
365 | | - enum dma_data_direction dir, unsigned long attrs) |
---|
366 | | -{ |
---|
367 | | - return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs); |
---|
368 | | -} |
---|
369 | | - |
---|
370 | | -static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
---|
371 | | - enum dma_data_direction dir, unsigned long attrs) |
---|
372 | | -{ |
---|
373 | | - vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs); |
---|
374 | | -} |
---|
375 | | - |
---|
376 | | -static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr, |
---|
377 | | - size_t size, enum dma_data_direction dir) |
---|
378 | | -{ |
---|
379 | | - vmd_dma_ops(dev)->sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir); |
---|
380 | | -} |
---|
381 | | - |
---|
382 | | -static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr, |
---|
383 | | - size_t size, enum dma_data_direction dir) |
---|
384 | | -{ |
---|
385 | | - vmd_dma_ops(dev)->sync_single_for_device(to_vmd_dev(dev), addr, size, |
---|
386 | | - dir); |
---|
387 | | -} |
---|
388 | | - |
---|
389 | | -static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
---|
390 | | - int nents, enum dma_data_direction dir) |
---|
391 | | -{ |
---|
392 | | - vmd_dma_ops(dev)->sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir); |
---|
393 | | -} |
---|
394 | | - |
---|
395 | | -static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
---|
396 | | - int nents, enum dma_data_direction dir) |
---|
397 | | -{ |
---|
398 | | - vmd_dma_ops(dev)->sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir); |
---|
399 | | -} |
---|
400 | | - |
---|
401 | | -static int vmd_mapping_error(struct device *dev, dma_addr_t addr) |
---|
402 | | -{ |
---|
403 | | - return vmd_dma_ops(dev)->mapping_error(to_vmd_dev(dev), addr); |
---|
404 | | -} |
---|
405 | | - |
---|
406 | | -static int vmd_dma_supported(struct device *dev, u64 mask) |
---|
407 | | -{ |
---|
408 | | - return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask); |
---|
409 | | -} |
---|
410 | | - |
---|
411 | | -#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK |
---|
412 | | -static u64 vmd_get_required_mask(struct device *dev) |
---|
413 | | -{ |
---|
414 | | - return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev)); |
---|
415 | | -} |
---|
416 | | -#endif |
---|
417 | | - |
---|
418 | | -static void vmd_teardown_dma_ops(struct vmd_dev *vmd) |
---|
419 | | -{ |
---|
420 | | - struct dma_domain *domain = &vmd->dma_domain; |
---|
421 | | - |
---|
422 | | - if (get_dma_ops(&vmd->dev->dev)) |
---|
423 | | - del_dma_domain(domain); |
---|
424 | | -} |
---|
425 | | - |
---|
426 | | -#define ASSIGN_VMD_DMA_OPS(source, dest, fn) \ |
---|
427 | | - do { \ |
---|
428 | | - if (source->fn) \ |
---|
429 | | - dest->fn = vmd_##fn; \ |
---|
430 | | - } while (0) |
---|
431 | | - |
---|
432 | | -static void vmd_setup_dma_ops(struct vmd_dev *vmd) |
---|
433 | | -{ |
---|
434 | | - const struct dma_map_ops *source = get_dma_ops(&vmd->dev->dev); |
---|
435 | | - struct dma_map_ops *dest = &vmd->dma_ops; |
---|
436 | | - struct dma_domain *domain = &vmd->dma_domain; |
---|
437 | | - |
---|
438 | | - domain->domain_nr = vmd->sysdata.domain; |
---|
439 | | - domain->dma_ops = dest; |
---|
440 | | - |
---|
441 | | - if (!source) |
---|
442 | | - return; |
---|
443 | | - ASSIGN_VMD_DMA_OPS(source, dest, alloc); |
---|
444 | | - ASSIGN_VMD_DMA_OPS(source, dest, free); |
---|
445 | | - ASSIGN_VMD_DMA_OPS(source, dest, mmap); |
---|
446 | | - ASSIGN_VMD_DMA_OPS(source, dest, get_sgtable); |
---|
447 | | - ASSIGN_VMD_DMA_OPS(source, dest, map_page); |
---|
448 | | - ASSIGN_VMD_DMA_OPS(source, dest, unmap_page); |
---|
449 | | - ASSIGN_VMD_DMA_OPS(source, dest, map_sg); |
---|
450 | | - ASSIGN_VMD_DMA_OPS(source, dest, unmap_sg); |
---|
451 | | - ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_cpu); |
---|
452 | | - ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device); |
---|
453 | | - ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu); |
---|
454 | | - ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device); |
---|
455 | | - ASSIGN_VMD_DMA_OPS(source, dest, mapping_error); |
---|
456 | | - ASSIGN_VMD_DMA_OPS(source, dest, dma_supported); |
---|
457 | | -#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK |
---|
458 | | - ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask); |
---|
459 | | -#endif |
---|
460 | | - add_dma_domain(domain); |
---|
461 | | -} |
---|
462 | | -#undef ASSIGN_VMD_DMA_OPS |
---|
463 | | -#else |
---|
464 | | -static void vmd_teardown_dma_ops(struct vmd_dev *vmd) {} |
---|
465 | | -static void vmd_setup_dma_ops(struct vmd_dev *vmd) {} |
---|
466 | | -#endif |
---|
467 | 327 | |
---|
468 | 328 | static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus, |
---|
469 | 329 | unsigned int devfn, int reg, int len) |
---|
.. | .. |
---|
584 | 444 | return domain + 1; |
---|
585 | 445 | } |
---|
586 | 446 | |
---|
| 447 | +static int vmd_get_phys_offsets(struct vmd_dev *vmd, bool native_hint, |
---|
| 448 | + resource_size_t *offset1, |
---|
| 449 | + resource_size_t *offset2) |
---|
| 450 | +{ |
---|
| 451 | + struct pci_dev *dev = vmd->dev; |
---|
| 452 | + u64 phys1, phys2; |
---|
| 453 | + |
---|
| 454 | + if (native_hint) { |
---|
| 455 | + u32 vmlock; |
---|
| 456 | + int ret; |
---|
| 457 | + |
---|
| 458 | + ret = pci_read_config_dword(dev, PCI_REG_VMLOCK, &vmlock); |
---|
| 459 | + if (ret || vmlock == ~0) |
---|
| 460 | + return -ENODEV; |
---|
| 461 | + |
---|
| 462 | + if (MB2_SHADOW_EN(vmlock)) { |
---|
| 463 | + void __iomem *membar2; |
---|
| 464 | + |
---|
| 465 | + membar2 = pci_iomap(dev, VMD_MEMBAR2, 0); |
---|
| 466 | + if (!membar2) |
---|
| 467 | + return -ENOMEM; |
---|
| 468 | + phys1 = readq(membar2 + MB2_SHADOW_OFFSET); |
---|
| 469 | + phys2 = readq(membar2 + MB2_SHADOW_OFFSET + 8); |
---|
| 470 | + pci_iounmap(dev, membar2); |
---|
| 471 | + } else |
---|
| 472 | + return 0; |
---|
| 473 | + } else { |
---|
| 474 | + /* Hypervisor-Emulated Vendor-Specific Capability */ |
---|
| 475 | + int pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); |
---|
| 476 | + u32 reg, regu; |
---|
| 477 | + |
---|
| 478 | + pci_read_config_dword(dev, pos + 4, ®); |
---|
| 479 | + |
---|
| 480 | + /* "SHDW" */ |
---|
| 481 | + if (pos && reg == 0x53484457) { |
---|
| 482 | + pci_read_config_dword(dev, pos + 8, ®); |
---|
| 483 | + pci_read_config_dword(dev, pos + 12, ®u); |
---|
| 484 | + phys1 = (u64) regu << 32 | reg; |
---|
| 485 | + |
---|
| 486 | + pci_read_config_dword(dev, pos + 16, ®); |
---|
| 487 | + pci_read_config_dword(dev, pos + 20, ®u); |
---|
| 488 | + phys2 = (u64) regu << 32 | reg; |
---|
| 489 | + } else |
---|
| 490 | + return 0; |
---|
| 491 | + } |
---|
| 492 | + |
---|
| 493 | + *offset1 = dev->resource[VMD_MEMBAR1].start - |
---|
| 494 | + (phys1 & PCI_BASE_ADDRESS_MEM_MASK); |
---|
| 495 | + *offset2 = dev->resource[VMD_MEMBAR2].start - |
---|
| 496 | + (phys2 & PCI_BASE_ADDRESS_MEM_MASK); |
---|
| 497 | + |
---|
| 498 | + return 0; |
---|
| 499 | +} |
---|
| 500 | + |
---|
| 501 | +static int vmd_get_bus_number_start(struct vmd_dev *vmd) |
---|
| 502 | +{ |
---|
| 503 | + struct pci_dev *dev = vmd->dev; |
---|
| 504 | + u16 reg; |
---|
| 505 | + |
---|
| 506 | + pci_read_config_word(dev, PCI_REG_VMCAP, ®); |
---|
| 507 | + if (BUS_RESTRICT_CAP(reg)) { |
---|
| 508 | + pci_read_config_word(dev, PCI_REG_VMCONFIG, ®); |
---|
| 509 | + |
---|
| 510 | + switch (BUS_RESTRICT_CFG(reg)) { |
---|
| 511 | + case 0: |
---|
| 512 | + vmd->busn_start = 0; |
---|
| 513 | + break; |
---|
| 514 | + case 1: |
---|
| 515 | + vmd->busn_start = 128; |
---|
| 516 | + break; |
---|
| 517 | + case 2: |
---|
| 518 | + vmd->busn_start = 224; |
---|
| 519 | + break; |
---|
| 520 | + default: |
---|
| 521 | + pci_err(dev, "Unknown Bus Offset Setting (%d)\n", |
---|
| 522 | + BUS_RESTRICT_CFG(reg)); |
---|
| 523 | + return -ENODEV; |
---|
| 524 | + } |
---|
| 525 | + } |
---|
| 526 | + |
---|
| 527 | + return 0; |
---|
| 528 | +} |
---|
| 529 | + |
---|
| 530 | +static irqreturn_t vmd_irq(int irq, void *data) |
---|
| 531 | +{ |
---|
| 532 | + struct vmd_irq_list *irqs = data; |
---|
| 533 | + struct vmd_irq *vmdirq; |
---|
| 534 | + int idx; |
---|
| 535 | + |
---|
| 536 | + idx = srcu_read_lock(&irqs->srcu); |
---|
| 537 | + list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node) |
---|
| 538 | + generic_handle_irq(vmdirq->virq); |
---|
| 539 | + srcu_read_unlock(&irqs->srcu, idx); |
---|
| 540 | + |
---|
| 541 | + return IRQ_HANDLED; |
---|
| 542 | +} |
---|
| 543 | + |
---|
| 544 | +static int vmd_alloc_irqs(struct vmd_dev *vmd) |
---|
| 545 | +{ |
---|
| 546 | + struct pci_dev *dev = vmd->dev; |
---|
| 547 | + int i, err; |
---|
| 548 | + |
---|
| 549 | + vmd->msix_count = pci_msix_vec_count(dev); |
---|
| 550 | + if (vmd->msix_count < 0) |
---|
| 551 | + return -ENODEV; |
---|
| 552 | + |
---|
| 553 | + vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count, |
---|
| 554 | + PCI_IRQ_MSIX); |
---|
| 555 | + if (vmd->msix_count < 0) |
---|
| 556 | + return vmd->msix_count; |
---|
| 557 | + |
---|
| 558 | + vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs), |
---|
| 559 | + GFP_KERNEL); |
---|
| 560 | + if (!vmd->irqs) |
---|
| 561 | + return -ENOMEM; |
---|
| 562 | + |
---|
| 563 | + for (i = 0; i < vmd->msix_count; i++) { |
---|
| 564 | + err = init_srcu_struct(&vmd->irqs[i].srcu); |
---|
| 565 | + if (err) |
---|
| 566 | + return err; |
---|
| 567 | + |
---|
| 568 | + INIT_LIST_HEAD(&vmd->irqs[i].irq_list); |
---|
| 569 | + err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i), |
---|
| 570 | + vmd_irq, IRQF_NO_THREAD, |
---|
| 571 | + "vmd", &vmd->irqs[i]); |
---|
| 572 | + if (err) |
---|
| 573 | + return err; |
---|
| 574 | + } |
---|
| 575 | + |
---|
| 576 | + return 0; |
---|
| 577 | +} |
---|
| 578 | + |
---|
587 | 579 | static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) |
---|
588 | 580 | { |
---|
589 | 581 | struct pci_sysdata *sd = &vmd->sysdata; |
---|
590 | | - struct fwnode_handle *fn; |
---|
591 | 582 | struct resource *res; |
---|
592 | 583 | u32 upper_bits; |
---|
593 | 584 | unsigned long flags; |
---|
594 | 585 | LIST_HEAD(resources); |
---|
595 | 586 | resource_size_t offset[2] = {0}; |
---|
596 | 587 | resource_size_t membar2_offset = 0x2000; |
---|
| 588 | + struct pci_bus *child; |
---|
| 589 | + int ret; |
---|
597 | 590 | |
---|
598 | 591 | /* |
---|
599 | 592 | * Shadow registers may exist in certain VMD device ids which allow |
---|
.. | .. |
---|
602 | 595 | * or 0, depending on an enable bit in the VMD device. |
---|
603 | 596 | */ |
---|
604 | 597 | if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) { |
---|
605 | | - u32 vmlock; |
---|
606 | | - int ret; |
---|
607 | | - |
---|
608 | 598 | membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE; |
---|
609 | | - ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock); |
---|
610 | | - if (ret || vmlock == ~0) |
---|
611 | | - return -ENODEV; |
---|
612 | | - |
---|
613 | | - if (MB2_SHADOW_EN(vmlock)) { |
---|
614 | | - void __iomem *membar2; |
---|
615 | | - |
---|
616 | | - membar2 = pci_iomap(vmd->dev, VMD_MEMBAR2, 0); |
---|
617 | | - if (!membar2) |
---|
618 | | - return -ENOMEM; |
---|
619 | | - offset[0] = vmd->dev->resource[VMD_MEMBAR1].start - |
---|
620 | | - (readq(membar2 + MB2_SHADOW_OFFSET) & |
---|
621 | | - PCI_BASE_ADDRESS_MEM_MASK); |
---|
622 | | - offset[1] = vmd->dev->resource[VMD_MEMBAR2].start - |
---|
623 | | - (readq(membar2 + MB2_SHADOW_OFFSET + 8) & |
---|
624 | | - PCI_BASE_ADDRESS_MEM_MASK); |
---|
625 | | - pci_iounmap(vmd->dev, membar2); |
---|
626 | | - } |
---|
| 599 | + ret = vmd_get_phys_offsets(vmd, true, &offset[0], &offset[1]); |
---|
| 600 | + if (ret) |
---|
| 601 | + return ret; |
---|
| 602 | + } else if (features & VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP) { |
---|
| 603 | + ret = vmd_get_phys_offsets(vmd, false, &offset[0], &offset[1]); |
---|
| 604 | + if (ret) |
---|
| 605 | + return ret; |
---|
627 | 606 | } |
---|
628 | 607 | |
---|
629 | 608 | /* |
---|
630 | 609 | * Certain VMD devices may have a root port configuration option which |
---|
631 | | - * limits the bus range to between 0-127 or 128-255 |
---|
| 610 | + * limits the bus range to between 0-127, 128-255, or 224-255 |
---|
632 | 611 | */ |
---|
633 | 612 | if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) { |
---|
634 | | - u32 vmcap, vmconfig; |
---|
635 | | - |
---|
636 | | - pci_read_config_dword(vmd->dev, PCI_REG_VMCAP, &vmcap); |
---|
637 | | - pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig); |
---|
638 | | - if (BUS_RESTRICT_CAP(vmcap) && |
---|
639 | | - (BUS_RESTRICT_CFG(vmconfig) == 0x1)) |
---|
640 | | - vmd->busn_start = 128; |
---|
| 613 | + ret = vmd_get_bus_number_start(vmd); |
---|
| 614 | + if (ret) |
---|
| 615 | + return ret; |
---|
641 | 616 | } |
---|
642 | 617 | |
---|
643 | 618 | res = &vmd->dev->resource[VMD_CFGBAR]; |
---|
.. | .. |
---|
658 | 633 | * 32-bit resources. __pci_assign_resource() enforces that |
---|
659 | 634 | * artificial restriction to make sure everything will fit. |
---|
660 | 635 | * |
---|
661 | | - * The only way we could use a 64-bit non-prefechable MEMBAR is |
---|
| 636 | + * The only way we could use a 64-bit non-prefetchable MEMBAR is |
---|
662 | 637 | * if its address is <4GB so that we can convert it to a 32-bit |
---|
663 | 638 | * resource. To be visible to the host OS, all VMD endpoints must |
---|
664 | 639 | * be initially configured by platform BIOS, which includes setting |
---|
.. | .. |
---|
691 | 666 | .parent = res, |
---|
692 | 667 | }; |
---|
693 | 668 | |
---|
694 | | - sd->vmd_domain = true; |
---|
| 669 | + sd->vmd_dev = vmd->dev; |
---|
695 | 670 | sd->domain = vmd_find_free_domain(); |
---|
696 | 671 | if (sd->domain < 0) |
---|
697 | 672 | return sd->domain; |
---|
698 | 673 | |
---|
699 | 674 | sd->node = pcibus_to_node(vmd->dev->bus); |
---|
700 | 675 | |
---|
701 | | - fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain); |
---|
702 | | - if (!fn) |
---|
703 | | - return -ENODEV; |
---|
| 676 | + ret = vmd_create_irq_domain(vmd); |
---|
| 677 | + if (ret) |
---|
| 678 | + return ret; |
---|
704 | 679 | |
---|
705 | | - vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, |
---|
706 | | - x86_vector_domain); |
---|
707 | | - if (!vmd->irq_domain) { |
---|
708 | | - irq_domain_free_fwnode(fn); |
---|
709 | | - return -ENODEV; |
---|
710 | | - } |
---|
| 680 | + /* |
---|
| 681 | + * Override the irq domain bus token so the domain can be distinguished |
---|
| 682 | + * from a regular PCI/MSI domain. |
---|
| 683 | + */ |
---|
| 684 | + irq_domain_update_bus_token(vmd->irq_domain, DOMAIN_BUS_VMD_MSI); |
---|
711 | 685 | |
---|
712 | 686 | pci_add_resource(&resources, &vmd->resources[0]); |
---|
713 | 687 | pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]); |
---|
714 | 688 | pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]); |
---|
715 | 689 | |
---|
716 | 690 | vmd->bus = pci_create_root_bus(&vmd->dev->dev, vmd->busn_start, |
---|
717 | | - &vmd_ops, sd, &resources); |
---|
| 691 | + &vmd_ops, sd, &resources); |
---|
718 | 692 | if (!vmd->bus) { |
---|
719 | 693 | pci_free_resource_list(&resources); |
---|
720 | | - irq_domain_remove(vmd->irq_domain); |
---|
721 | | - irq_domain_free_fwnode(fn); |
---|
| 694 | + vmd_remove_irq_domain(vmd); |
---|
722 | 695 | return -ENODEV; |
---|
723 | 696 | } |
---|
724 | 697 | |
---|
725 | 698 | vmd_attach_resources(vmd); |
---|
726 | | - vmd_setup_dma_ops(vmd); |
---|
727 | | - dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain); |
---|
728 | | - pci_rescan_bus(vmd->bus); |
---|
| 699 | + if (vmd->irq_domain) |
---|
| 700 | + dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain); |
---|
| 701 | + |
---|
| 702 | + pci_scan_child_bus(vmd->bus); |
---|
| 703 | + pci_assign_unassigned_bus_resources(vmd->bus); |
---|
| 704 | + |
---|
| 705 | + /* |
---|
| 706 | + * VMD root buses are virtual and don't return true on pci_is_pcie() |
---|
| 707 | + * and will fail pcie_bus_configure_settings() early. It can instead be |
---|
| 708 | + * run on each of the real root ports. |
---|
| 709 | + */ |
---|
| 710 | + list_for_each_entry(child, &vmd->bus->children, node) |
---|
| 711 | + pcie_bus_configure_settings(child); |
---|
| 712 | + |
---|
| 713 | + pci_bus_add_devices(vmd->bus); |
---|
729 | 714 | |
---|
730 | 715 | WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj, |
---|
731 | 716 | "domain"), "Can't create symlink to domain\n"); |
---|
732 | 717 | return 0; |
---|
733 | 718 | } |
---|
734 | 719 | |
---|
735 | | -static irqreturn_t vmd_irq(int irq, void *data) |
---|
736 | | -{ |
---|
737 | | - struct vmd_irq_list *irqs = data; |
---|
738 | | - struct vmd_irq *vmdirq; |
---|
739 | | - int idx; |
---|
740 | | - |
---|
741 | | - idx = srcu_read_lock(&irqs->srcu); |
---|
742 | | - list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node) |
---|
743 | | - generic_handle_irq(vmdirq->virq); |
---|
744 | | - srcu_read_unlock(&irqs->srcu, idx); |
---|
745 | | - |
---|
746 | | - return IRQ_HANDLED; |
---|
747 | | -} |
---|
748 | | - |
---|
749 | 720 | static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) |
---|
750 | 721 | { |
---|
751 | 722 | struct vmd_dev *vmd; |
---|
752 | | - int i, err; |
---|
| 723 | + int err; |
---|
753 | 724 | |
---|
754 | 725 | if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20)) |
---|
755 | 726 | return -ENOMEM; |
---|
.. | .. |
---|
772 | 743 | dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) |
---|
773 | 744 | return -ENODEV; |
---|
774 | 745 | |
---|
775 | | - vmd->msix_count = pci_msix_vec_count(dev); |
---|
776 | | - if (vmd->msix_count < 0) |
---|
777 | | - return -ENODEV; |
---|
778 | | - |
---|
779 | | - vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count, |
---|
780 | | - PCI_IRQ_MSIX); |
---|
781 | | - if (vmd->msix_count < 0) |
---|
782 | | - return vmd->msix_count; |
---|
783 | | - |
---|
784 | | - vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs), |
---|
785 | | - GFP_KERNEL); |
---|
786 | | - if (!vmd->irqs) |
---|
787 | | - return -ENOMEM; |
---|
788 | | - |
---|
789 | | - for (i = 0; i < vmd->msix_count; i++) { |
---|
790 | | - err = init_srcu_struct(&vmd->irqs[i].srcu); |
---|
791 | | - if (err) |
---|
792 | | - return err; |
---|
793 | | - |
---|
794 | | - INIT_LIST_HEAD(&vmd->irqs[i].irq_list); |
---|
795 | | - err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i), |
---|
796 | | - vmd_irq, IRQF_NO_THREAD, |
---|
797 | | - "vmd", &vmd->irqs[i]); |
---|
798 | | - if (err) |
---|
799 | | - return err; |
---|
800 | | - } |
---|
| 746 | + err = vmd_alloc_irqs(vmd); |
---|
| 747 | + if (err) |
---|
| 748 | + return err; |
---|
801 | 749 | |
---|
802 | 750 | spin_lock_init(&vmd->cfg_lock); |
---|
803 | 751 | pci_set_drvdata(dev, vmd); |
---|
.. | .. |
---|
821 | 769 | static void vmd_remove(struct pci_dev *dev) |
---|
822 | 770 | { |
---|
823 | 771 | struct vmd_dev *vmd = pci_get_drvdata(dev); |
---|
824 | | - struct fwnode_handle *fn = vmd->irq_domain->fwnode; |
---|
825 | 772 | |
---|
826 | 773 | sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); |
---|
827 | 774 | pci_stop_root_bus(vmd->bus); |
---|
828 | 775 | pci_remove_root_bus(vmd->bus); |
---|
829 | 776 | vmd_cleanup_srcu(vmd); |
---|
830 | | - vmd_teardown_dma_ops(vmd); |
---|
831 | 777 | vmd_detach_resources(vmd); |
---|
832 | | - irq_domain_remove(vmd->irq_domain); |
---|
833 | | - irq_domain_free_fwnode(fn); |
---|
| 778 | + vmd_remove_irq_domain(vmd); |
---|
834 | 779 | } |
---|
835 | 780 | |
---|
836 | 781 | #ifdef CONFIG_PM_SLEEP |
---|
.. | .. |
---|
841 | 786 | int i; |
---|
842 | 787 | |
---|
843 | 788 | for (i = 0; i < vmd->msix_count; i++) |
---|
844 | | - devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]); |
---|
| 789 | + devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]); |
---|
845 | 790 | |
---|
846 | | - pci_save_state(pdev); |
---|
847 | 791 | return 0; |
---|
848 | 792 | } |
---|
849 | 793 | |
---|
.. | .. |
---|
861 | 805 | return err; |
---|
862 | 806 | } |
---|
863 | 807 | |
---|
864 | | - pci_restore_state(pdev); |
---|
865 | 808 | return 0; |
---|
866 | 809 | } |
---|
867 | 810 | #endif |
---|
868 | 811 | static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume); |
---|
869 | 812 | |
---|
870 | 813 | static const struct pci_device_id vmd_ids[] = { |
---|
871 | | - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),}, |
---|
| 814 | + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D), |
---|
| 815 | + .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP,}, |
---|
872 | 816 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0), |
---|
873 | 817 | .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW | |
---|
874 | 818 | VMD_FEAT_HAS_BUS_RESTRICTIONS,}, |
---|
| 819 | + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x467f), |
---|
| 820 | + .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP | |
---|
| 821 | + VMD_FEAT_HAS_BUS_RESTRICTIONS,}, |
---|
| 822 | + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c3d), |
---|
| 823 | + .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP | |
---|
| 824 | + VMD_FEAT_HAS_BUS_RESTRICTIONS,}, |
---|
875 | 825 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B), |
---|
876 | | - .driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,}, |
---|
| 826 | + .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP | |
---|
| 827 | + VMD_FEAT_HAS_BUS_RESTRICTIONS,}, |
---|
877 | 828 | {0,} |
---|
878 | 829 | }; |
---|
879 | 830 | MODULE_DEVICE_TABLE(pci, vmd_ids); |
---|