.. | .. |
---|
35 | 35 | */ |
---|
36 | 36 | |
---|
37 | 37 | #include <linux/slab.h> |
---|
| 38 | + |
---|
38 | 39 | #include "i915_drv.h" |
---|
| 40 | +#include "gt/intel_ring.h" |
---|
39 | 41 | #include "gvt.h" |
---|
40 | 42 | #include "i915_pvinfo.h" |
---|
41 | 43 | #include "trace.h" |
---|
.. | .. |
---|
55 | 57 | int low; |
---|
56 | 58 | }; |
---|
57 | 59 | struct decode_info { |
---|
58 | | - char *name; |
---|
| 60 | + const char *name; |
---|
59 | 61 | int op_len; |
---|
60 | 62 | int nr_sub_op; |
---|
61 | | - struct sub_op_bits *sub_op; |
---|
| 63 | + const struct sub_op_bits *sub_op; |
---|
62 | 64 | }; |
---|
63 | 65 | |
---|
64 | 66 | #define MAX_CMD_BUDGET 0x7fffffff |
---|
.. | .. |
---|
162 | 164 | #define OP_STATE_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x01) |
---|
163 | 165 | #define OP_STATE_SIP OP_3D_MEDIA(0x0, 0x1, 0x02) |
---|
164 | 166 | #define OP_3D_MEDIA_0_1_4 OP_3D_MEDIA(0x0, 0x1, 0x04) |
---|
| 167 | +#define OP_SWTESS_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x03) |
---|
165 | 168 | |
---|
166 | 169 | #define OP_3DSTATE_VF_STATISTICS_GM45 OP_3D_MEDIA(0x1, 0x0, 0x0B) |
---|
167 | 170 | |
---|
.. | .. |
---|
374 | 377 | #define ADDR_FIX_4(x1, x2, x3, x4) (ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4)) |
---|
375 | 378 | #define ADDR_FIX_5(x1, x2, x3, x4, x5) (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5)) |
---|
376 | 379 | |
---|
| 380 | +#define DWORD_FIELD(dword, end, start) \ |
---|
| 381 | + FIELD_GET(GENMASK(end, start), cmd_val(s, dword)) |
---|
| 382 | + |
---|
| 383 | +#define OP_LENGTH_BIAS 2 |
---|
| 384 | +#define CMD_LEN(value) (value + OP_LENGTH_BIAS) |
---|
| 385 | + |
---|
| 386 | +static int gvt_check_valid_cmd_length(int len, int valid_len) |
---|
| 387 | +{ |
---|
| 388 | + if (valid_len != len) { |
---|
| 389 | + gvt_err("len is not valid: len=%u valid_len=%u\n", |
---|
| 390 | + len, valid_len); |
---|
| 391 | + return -EFAULT; |
---|
| 392 | + } |
---|
| 393 | + return 0; |
---|
| 394 | +} |
---|
| 395 | + |
---|
377 | 396 | struct cmd_info { |
---|
378 | | - char *name; |
---|
| 397 | + const char *name; |
---|
379 | 398 | u32 opcode; |
---|
380 | 399 | |
---|
381 | | -#define F_LEN_MASK (1U<<0) |
---|
| 400 | +#define F_LEN_MASK 3U |
---|
382 | 401 | #define F_LEN_CONST 1U |
---|
383 | 402 | #define F_LEN_VAR 0U |
---|
| 403 | +/* value is const although LEN maybe variable */ |
---|
| 404 | +#define F_LEN_VAR_FIXED (1<<1) |
---|
384 | 405 | |
---|
385 | 406 | /* |
---|
386 | 407 | * command has its own ip advance logic |
---|
387 | 408 | * e.g. MI_BATCH_START, MI_BATCH_END |
---|
388 | 409 | */ |
---|
389 | | -#define F_IP_ADVANCE_CUSTOM (1<<1) |
---|
390 | | - |
---|
391 | | -#define F_POST_HANDLE (1<<2) |
---|
| 410 | +#define F_IP_ADVANCE_CUSTOM (1<<2) |
---|
392 | 411 | u32 flag; |
---|
393 | 412 | |
---|
394 | | -#define R_RCS (1 << RCS) |
---|
395 | | -#define R_VCS1 (1 << VCS) |
---|
396 | | -#define R_VCS2 (1 << VCS2) |
---|
| 413 | +#define R_RCS BIT(RCS0) |
---|
| 414 | +#define R_VCS1 BIT(VCS0) |
---|
| 415 | +#define R_VCS2 BIT(VCS1) |
---|
397 | 416 | #define R_VCS (R_VCS1 | R_VCS2) |
---|
398 | | -#define R_BCS (1 << BCS) |
---|
399 | | -#define R_VECS (1 << VECS) |
---|
| 417 | +#define R_BCS BIT(BCS0) |
---|
| 418 | +#define R_VECS BIT(VECS0) |
---|
400 | 419 | #define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS) |
---|
401 | 420 | /* rings that support this cmd: BLT/RCS/VCS/VECS */ |
---|
402 | | - uint16_t rings; |
---|
| 421 | + u16 rings; |
---|
403 | 422 | |
---|
404 | 423 | /* devices that support this cmd: SNB/IVB/HSW/... */ |
---|
405 | | - uint16_t devices; |
---|
| 424 | + u16 devices; |
---|
406 | 425 | |
---|
407 | 426 | /* which DWords are address that need fix up. |
---|
408 | 427 | * bit 0 means a 32-bit non address operand in command |
---|
.. | .. |
---|
412 | 431 | * No matter the address length, each address only takes |
---|
413 | 432 | * one bit in the bitmap. |
---|
414 | 433 | */ |
---|
415 | | - uint16_t addr_bitmap; |
---|
| 434 | + u16 addr_bitmap; |
---|
416 | 435 | |
---|
417 | 436 | /* flag == F_LEN_CONST : command length |
---|
418 | 437 | * flag == F_LEN_VAR : length bias bits |
---|
419 | 438 | * Note: length is in DWord |
---|
420 | 439 | */ |
---|
421 | | - uint8_t len; |
---|
| 440 | + u32 len; |
---|
422 | 441 | |
---|
423 | 442 | parser_cmd_handler handler; |
---|
| 443 | + |
---|
| 444 | + /* valid length in DWord */ |
---|
| 445 | + u32 valid_len; |
---|
424 | 446 | }; |
---|
425 | 447 | |
---|
426 | 448 | struct cmd_entry { |
---|
427 | 449 | struct hlist_node hlist; |
---|
428 | | - struct cmd_info *info; |
---|
| 450 | + const struct cmd_info *info; |
---|
429 | 451 | }; |
---|
430 | 452 | |
---|
431 | 453 | enum { |
---|
.. | .. |
---|
441 | 463 | |
---|
442 | 464 | struct parser_exec_state { |
---|
443 | 465 | struct intel_vgpu *vgpu; |
---|
444 | | - int ring_id; |
---|
| 466 | + const struct intel_engine_cs *engine; |
---|
445 | 467 | |
---|
446 | 468 | int buf_type; |
---|
447 | 469 | |
---|
.. | .. |
---|
474 | 496 | int saved_buf_addr_type; |
---|
475 | 497 | bool is_ctx_wa; |
---|
476 | 498 | |
---|
477 | | - struct cmd_info *info; |
---|
| 499 | + const struct cmd_info *info; |
---|
478 | 500 | |
---|
479 | 501 | struct intel_vgpu_workload *workload; |
---|
480 | 502 | }; |
---|
.. | .. |
---|
485 | 507 | static unsigned long bypass_scan_mask = 0; |
---|
486 | 508 | |
---|
487 | 509 | /* ring ALL, type = 0 */ |
---|
488 | | -static struct sub_op_bits sub_op_mi[] = { |
---|
| 510 | +static const struct sub_op_bits sub_op_mi[] = { |
---|
489 | 511 | {31, 29}, |
---|
490 | 512 | {28, 23}, |
---|
491 | 513 | }; |
---|
492 | 514 | |
---|
493 | | -static struct decode_info decode_info_mi = { |
---|
| 515 | +static const struct decode_info decode_info_mi = { |
---|
494 | 516 | "MI", |
---|
495 | 517 | OP_LEN_MI, |
---|
496 | 518 | ARRAY_SIZE(sub_op_mi), |
---|
.. | .. |
---|
498 | 520 | }; |
---|
499 | 521 | |
---|
500 | 522 | /* ring RCS, command type 2 */ |
---|
501 | | -static struct sub_op_bits sub_op_2d[] = { |
---|
| 523 | +static const struct sub_op_bits sub_op_2d[] = { |
---|
502 | 524 | {31, 29}, |
---|
503 | 525 | {28, 22}, |
---|
504 | 526 | }; |
---|
505 | 527 | |
---|
506 | | -static struct decode_info decode_info_2d = { |
---|
| 528 | +static const struct decode_info decode_info_2d = { |
---|
507 | 529 | "2D", |
---|
508 | 530 | OP_LEN_2D, |
---|
509 | 531 | ARRAY_SIZE(sub_op_2d), |
---|
.. | .. |
---|
511 | 533 | }; |
---|
512 | 534 | |
---|
513 | 535 | /* ring RCS, command type 3 */ |
---|
514 | | -static struct sub_op_bits sub_op_3d_media[] = { |
---|
| 536 | +static const struct sub_op_bits sub_op_3d_media[] = { |
---|
515 | 537 | {31, 29}, |
---|
516 | 538 | {28, 27}, |
---|
517 | 539 | {26, 24}, |
---|
518 | 540 | {23, 16}, |
---|
519 | 541 | }; |
---|
520 | 542 | |
---|
521 | | -static struct decode_info decode_info_3d_media = { |
---|
| 543 | +static const struct decode_info decode_info_3d_media = { |
---|
522 | 544 | "3D_Media", |
---|
523 | 545 | OP_LEN_3D_MEDIA, |
---|
524 | 546 | ARRAY_SIZE(sub_op_3d_media), |
---|
.. | .. |
---|
526 | 548 | }; |
---|
527 | 549 | |
---|
528 | 550 | /* ring VCS, command type 3 */ |
---|
529 | | -static struct sub_op_bits sub_op_mfx_vc[] = { |
---|
| 551 | +static const struct sub_op_bits sub_op_mfx_vc[] = { |
---|
530 | 552 | {31, 29}, |
---|
531 | 553 | {28, 27}, |
---|
532 | 554 | {26, 24}, |
---|
.. | .. |
---|
534 | 556 | {20, 16}, |
---|
535 | 557 | }; |
---|
536 | 558 | |
---|
537 | | -static struct decode_info decode_info_mfx_vc = { |
---|
| 559 | +static const struct decode_info decode_info_mfx_vc = { |
---|
538 | 560 | "MFX_VC", |
---|
539 | 561 | OP_LEN_MFX_VC, |
---|
540 | 562 | ARRAY_SIZE(sub_op_mfx_vc), |
---|
.. | .. |
---|
542 | 564 | }; |
---|
543 | 565 | |
---|
544 | 566 | /* ring VECS, command type 3 */ |
---|
545 | | -static struct sub_op_bits sub_op_vebox[] = { |
---|
| 567 | +static const struct sub_op_bits sub_op_vebox[] = { |
---|
546 | 568 | {31, 29}, |
---|
547 | 569 | {28, 27}, |
---|
548 | 570 | {26, 24}, |
---|
.. | .. |
---|
550 | 572 | {20, 16}, |
---|
551 | 573 | }; |
---|
552 | 574 | |
---|
553 | | -static struct decode_info decode_info_vebox = { |
---|
| 575 | +static const struct decode_info decode_info_vebox = { |
---|
554 | 576 | "VEBOX", |
---|
555 | 577 | OP_LEN_VEBOX, |
---|
556 | 578 | ARRAY_SIZE(sub_op_vebox), |
---|
557 | 579 | sub_op_vebox, |
---|
558 | 580 | }; |
---|
559 | 581 | |
---|
560 | | -static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { |
---|
561 | | - [RCS] = { |
---|
| 582 | +static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { |
---|
| 583 | + [RCS0] = { |
---|
562 | 584 | &decode_info_mi, |
---|
563 | 585 | NULL, |
---|
564 | 586 | NULL, |
---|
.. | .. |
---|
569 | 591 | NULL, |
---|
570 | 592 | }, |
---|
571 | 593 | |
---|
572 | | - [VCS] = { |
---|
| 594 | + [VCS0] = { |
---|
573 | 595 | &decode_info_mi, |
---|
574 | 596 | NULL, |
---|
575 | 597 | NULL, |
---|
.. | .. |
---|
580 | 602 | NULL, |
---|
581 | 603 | }, |
---|
582 | 604 | |
---|
583 | | - [BCS] = { |
---|
| 605 | + [BCS0] = { |
---|
584 | 606 | &decode_info_mi, |
---|
585 | 607 | NULL, |
---|
586 | 608 | &decode_info_2d, |
---|
.. | .. |
---|
591 | 613 | NULL, |
---|
592 | 614 | }, |
---|
593 | 615 | |
---|
594 | | - [VECS] = { |
---|
| 616 | + [VECS0] = { |
---|
595 | 617 | &decode_info_mi, |
---|
596 | 618 | NULL, |
---|
597 | 619 | NULL, |
---|
.. | .. |
---|
602 | 624 | NULL, |
---|
603 | 625 | }, |
---|
604 | 626 | |
---|
605 | | - [VCS2] = { |
---|
| 627 | + [VCS1] = { |
---|
606 | 628 | &decode_info_mi, |
---|
607 | 629 | NULL, |
---|
608 | 630 | NULL, |
---|
.. | .. |
---|
614 | 636 | }, |
---|
615 | 637 | }; |
---|
616 | 638 | |
---|
617 | | -static inline u32 get_opcode(u32 cmd, int ring_id) |
---|
| 639 | +static inline u32 get_opcode(u32 cmd, const struct intel_engine_cs *engine) |
---|
618 | 640 | { |
---|
619 | | - struct decode_info *d_info; |
---|
| 641 | + const struct decode_info *d_info; |
---|
620 | 642 | |
---|
621 | | - d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)]; |
---|
| 643 | + d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)]; |
---|
622 | 644 | if (d_info == NULL) |
---|
623 | 645 | return INVALID_OP; |
---|
624 | 646 | |
---|
625 | 647 | return cmd >> (32 - d_info->op_len); |
---|
626 | 648 | } |
---|
627 | 649 | |
---|
628 | | -static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt, |
---|
629 | | - unsigned int opcode, int ring_id) |
---|
| 650 | +static inline const struct cmd_info * |
---|
| 651 | +find_cmd_entry(struct intel_gvt *gvt, unsigned int opcode, |
---|
| 652 | + const struct intel_engine_cs *engine) |
---|
630 | 653 | { |
---|
631 | 654 | struct cmd_entry *e; |
---|
632 | 655 | |
---|
633 | 656 | hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) { |
---|
634 | | - if ((opcode == e->info->opcode) && |
---|
635 | | - (e->info->rings & (1 << ring_id))) |
---|
| 657 | + if (opcode == e->info->opcode && |
---|
| 658 | + e->info->rings & engine->mask) |
---|
636 | 659 | return e->info; |
---|
637 | 660 | } |
---|
638 | 661 | return NULL; |
---|
639 | 662 | } |
---|
640 | 663 | |
---|
641 | | -static inline struct cmd_info *get_cmd_info(struct intel_gvt *gvt, |
---|
642 | | - u32 cmd, int ring_id) |
---|
| 664 | +static inline const struct cmd_info * |
---|
| 665 | +get_cmd_info(struct intel_gvt *gvt, u32 cmd, |
---|
| 666 | + const struct intel_engine_cs *engine) |
---|
643 | 667 | { |
---|
644 | 668 | u32 opcode; |
---|
645 | 669 | |
---|
646 | | - opcode = get_opcode(cmd, ring_id); |
---|
| 670 | + opcode = get_opcode(cmd, engine); |
---|
647 | 671 | if (opcode == INVALID_OP) |
---|
648 | 672 | return NULL; |
---|
649 | 673 | |
---|
650 | | - return find_cmd_entry(gvt, opcode, ring_id); |
---|
| 674 | + return find_cmd_entry(gvt, opcode, engine); |
---|
651 | 675 | } |
---|
652 | 676 | |
---|
653 | 677 | static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low) |
---|
.. | .. |
---|
655 | 679 | return (cmd >> low) & ((1U << (hi - low + 1)) - 1); |
---|
656 | 680 | } |
---|
657 | 681 | |
---|
658 | | -static inline void print_opcode(u32 cmd, int ring_id) |
---|
| 682 | +static inline void print_opcode(u32 cmd, const struct intel_engine_cs *engine) |
---|
659 | 683 | { |
---|
660 | | - struct decode_info *d_info; |
---|
| 684 | + const struct decode_info *d_info; |
---|
661 | 685 | int i; |
---|
662 | 686 | |
---|
663 | | - d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)]; |
---|
| 687 | + d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)]; |
---|
664 | 688 | if (d_info == NULL) |
---|
665 | 689 | return; |
---|
666 | 690 | |
---|
.. | .. |
---|
689 | 713 | int cnt = 0; |
---|
690 | 714 | int i; |
---|
691 | 715 | |
---|
692 | | - gvt_dbg_cmd(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)" |
---|
693 | | - " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id, |
---|
694 | | - s->ring_id, s->ring_start, s->ring_start + s->ring_size, |
---|
695 | | - s->ring_head, s->ring_tail); |
---|
| 716 | + gvt_dbg_cmd(" vgpu%d RING%s: ring_start(%08lx) ring_end(%08lx)" |
---|
| 717 | + " ring_head(%08lx) ring_tail(%08lx)\n", |
---|
| 718 | + s->vgpu->id, s->engine->name, |
---|
| 719 | + s->ring_start, s->ring_start + s->ring_size, |
---|
| 720 | + s->ring_head, s->ring_tail); |
---|
696 | 721 | |
---|
697 | 722 | gvt_dbg_cmd(" %s %s ip_gma(%08lx) ", |
---|
698 | 723 | s->buf_type == RING_BUFFER_INSTRUCTION ? |
---|
.. | .. |
---|
709 | 734 | s->ip_va, cmd_val(s, 0), cmd_val(s, 1), |
---|
710 | 735 | cmd_val(s, 2), cmd_val(s, 3)); |
---|
711 | 736 | |
---|
712 | | - print_opcode(cmd_val(s, 0), s->ring_id); |
---|
| 737 | + print_opcode(cmd_val(s, 0), s->engine); |
---|
713 | 738 | |
---|
714 | 739 | s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12); |
---|
715 | 740 | |
---|
.. | .. |
---|
776 | 801 | return 0; |
---|
777 | 802 | } |
---|
778 | 803 | |
---|
779 | | -static inline int get_cmd_length(struct cmd_info *info, u32 cmd) |
---|
| 804 | +static inline int get_cmd_length(const struct cmd_info *info, u32 cmd) |
---|
780 | 805 | { |
---|
781 | 806 | if ((info->flag & F_LEN_MASK) == F_LEN_CONST) |
---|
782 | 807 | return info->len; |
---|
.. | .. |
---|
820 | 845 | unsigned int data; |
---|
821 | 846 | u32 ring_base; |
---|
822 | 847 | u32 nopid; |
---|
823 | | - struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; |
---|
824 | 848 | |
---|
825 | 849 | if (!strcmp(cmd, "lri")) |
---|
826 | 850 | data = cmd_val(s, index + 1); |
---|
.. | .. |
---|
830 | 854 | return -EINVAL; |
---|
831 | 855 | } |
---|
832 | 856 | |
---|
833 | | - ring_base = dev_priv->engine[s->ring_id]->mmio_base; |
---|
| 857 | + ring_base = s->engine->mmio_base; |
---|
834 | 858 | nopid = i915_mmio_reg_offset(RING_NOPID(ring_base)); |
---|
835 | 859 | |
---|
836 | 860 | if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data) && |
---|
.. | .. |
---|
858 | 882 | return 0; |
---|
859 | 883 | } |
---|
860 | 884 | |
---|
| 885 | +static int is_cmd_update_pdps(unsigned int offset, |
---|
| 886 | + struct parser_exec_state *s) |
---|
| 887 | +{ |
---|
| 888 | + u32 base = s->workload->engine->mmio_base; |
---|
| 889 | + return i915_mmio_reg_equal(_MMIO(offset), GEN8_RING_PDP_UDW(base, 0)); |
---|
| 890 | +} |
---|
| 891 | + |
---|
| 892 | +static int cmd_pdp_mmio_update_handler(struct parser_exec_state *s, |
---|
| 893 | + unsigned int offset, unsigned int index) |
---|
| 894 | +{ |
---|
| 895 | + struct intel_vgpu *vgpu = s->vgpu; |
---|
| 896 | + struct intel_vgpu_mm *shadow_mm = s->workload->shadow_mm; |
---|
| 897 | + struct intel_vgpu_mm *mm; |
---|
| 898 | + u64 pdps[GEN8_3LVL_PDPES]; |
---|
| 899 | + |
---|
| 900 | + if (shadow_mm->ppgtt_mm.root_entry_type == |
---|
| 901 | + GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { |
---|
| 902 | + pdps[0] = (u64)cmd_val(s, 2) << 32; |
---|
| 903 | + pdps[0] |= cmd_val(s, 4); |
---|
| 904 | + |
---|
| 905 | + mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps); |
---|
| 906 | + if (!mm) { |
---|
| 907 | + gvt_vgpu_err("failed to get the 4-level shadow vm\n"); |
---|
| 908 | + return -EINVAL; |
---|
| 909 | + } |
---|
| 910 | + intel_vgpu_mm_get(mm); |
---|
| 911 | + list_add_tail(&mm->ppgtt_mm.link, |
---|
| 912 | + &s->workload->lri_shadow_mm); |
---|
| 913 | + *cmd_ptr(s, 2) = upper_32_bits(mm->ppgtt_mm.shadow_pdps[0]); |
---|
| 914 | + *cmd_ptr(s, 4) = lower_32_bits(mm->ppgtt_mm.shadow_pdps[0]); |
---|
| 915 | + } else { |
---|
| 916 | + /* Currently all guests use PML4 table and now can't |
---|
| 917 | + * have a guest with 3-level table but uses LRI for |
---|
| 918 | + * PPGTT update. So this is simply un-testable. */ |
---|
| 919 | + GEM_BUG_ON(1); |
---|
| 920 | + gvt_vgpu_err("invalid shared shadow vm type\n"); |
---|
| 921 | + return -EINVAL; |
---|
| 922 | + } |
---|
| 923 | + return 0; |
---|
| 924 | +} |
---|
| 925 | + |
---|
861 | 926 | static int cmd_reg_handler(struct parser_exec_state *s, |
---|
862 | 927 | unsigned int offset, unsigned int index, char *cmd) |
---|
863 | 928 | { |
---|
.. | .. |
---|
871 | 936 | return -EFAULT; |
---|
872 | 937 | } |
---|
873 | 938 | |
---|
874 | | - if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) { |
---|
| 939 | + if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) { |
---|
875 | 940 | gvt_vgpu_err("%s access to non-render register (%x)\n", |
---|
876 | 941 | cmd, offset); |
---|
877 | 942 | return -EBADRQC; |
---|
.. | .. |
---|
896 | 961 | patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE); |
---|
897 | 962 | } |
---|
898 | 963 | |
---|
| 964 | + if (is_cmd_update_pdps(offset, s) && |
---|
| 965 | + cmd_pdp_mmio_update_handler(s, offset, index)) |
---|
| 966 | + return -EINVAL; |
---|
| 967 | + |
---|
899 | 968 | /* TODO |
---|
900 | | - * Right now only scan LRI command on KBL and in inhibit context. |
---|
901 | | - * It's good enough to support initializing mmio by lri command in |
---|
902 | | - * vgpu inhibit context on KBL. |
---|
| 969 | + * In order to let workload with inhibit context to generate |
---|
| 970 | + * correct image data into memory, vregs values will be loaded to |
---|
| 971 | + * hw via LRIs in the workload with inhibit context. But as |
---|
| 972 | + * indirect context is loaded prior to LRIs in workload, we don't |
---|
| 973 | + * want reg values specified in indirect context overwritten by |
---|
| 974 | + * LRIs in workloads. So, when scanning an indirect context, we |
---|
| 975 | + * update reg values in it into vregs, so LRIs in workload with |
---|
| 976 | + * inhibit context will restore with correct values |
---|
903 | 977 | */ |
---|
904 | | - if (IS_KABYLAKE(s->vgpu->gvt->dev_priv) && |
---|
905 | | - intel_gvt_mmio_is_in_ctx(gvt, offset) && |
---|
906 | | - !strncmp(cmd, "lri", 3)) { |
---|
| 978 | + if (IS_GEN(s->engine->i915, 9) && |
---|
| 979 | + intel_gvt_mmio_is_sr_in_ctx(gvt, offset) && |
---|
| 980 | + !strncmp(cmd, "lri", 3)) { |
---|
907 | 981 | intel_gvt_hypervisor_read_gpa(s->vgpu, |
---|
908 | 982 | s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4); |
---|
909 | 983 | /* check inhibit context */ |
---|
.. | .. |
---|
918 | 992 | } |
---|
919 | 993 | } |
---|
920 | 994 | |
---|
921 | | - /* TODO: Update the global mask if this MMIO is a masked-MMIO */ |
---|
922 | | - intel_gvt_mmio_set_cmd_accessed(gvt, offset); |
---|
923 | 995 | return 0; |
---|
924 | 996 | } |
---|
925 | 997 | |
---|
.. | .. |
---|
939 | 1011 | { |
---|
940 | 1012 | int i, ret = 0; |
---|
941 | 1013 | int cmd_len = cmd_length(s); |
---|
942 | | - struct intel_gvt *gvt = s->vgpu->gvt; |
---|
943 | 1014 | |
---|
944 | 1015 | for (i = 1; i < cmd_len; i += 2) { |
---|
945 | | - if (IS_BROADWELL(gvt->dev_priv) && |
---|
946 | | - (s->ring_id != RCS)) { |
---|
947 | | - if (s->ring_id == BCS && |
---|
948 | | - cmd_reg(s, i) == |
---|
949 | | - i915_mmio_reg_offset(DERRMR)) |
---|
| 1016 | + if (IS_BROADWELL(s->engine->i915) && s->engine->id != RCS0) { |
---|
| 1017 | + if (s->engine->id == BCS0 && |
---|
| 1018 | + cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR)) |
---|
950 | 1019 | ret |= 0; |
---|
951 | 1020 | else |
---|
952 | | - ret |= (cmd_reg_inhibit(s, i)) ? |
---|
953 | | - -EBADRQC : 0; |
---|
| 1021 | + ret |= cmd_reg_inhibit(s, i) ? -EBADRQC : 0; |
---|
954 | 1022 | } |
---|
955 | 1023 | if (ret) |
---|
956 | 1024 | break; |
---|
.. | .. |
---|
967 | 1035 | int cmd_len = cmd_length(s); |
---|
968 | 1036 | |
---|
969 | 1037 | for (i = 1; i < cmd_len; i += 2) { |
---|
970 | | - if (IS_BROADWELL(s->vgpu->gvt->dev_priv)) |
---|
| 1038 | + if (IS_BROADWELL(s->engine->i915)) |
---|
971 | 1039 | ret |= ((cmd_reg_inhibit(s, i) || |
---|
972 | | - (cmd_reg_inhibit(s, i + 1)))) ? |
---|
| 1040 | + (cmd_reg_inhibit(s, i + 1)))) ? |
---|
973 | 1041 | -EBADRQC : 0; |
---|
974 | 1042 | if (ret) |
---|
975 | 1043 | break; |
---|
.. | .. |
---|
995 | 1063 | int cmd_len = cmd_length(s); |
---|
996 | 1064 | |
---|
997 | 1065 | for (i = 1; i < cmd_len;) { |
---|
998 | | - if (IS_BROADWELL(gvt->dev_priv)) |
---|
| 1066 | + if (IS_BROADWELL(s->engine->i915)) |
---|
999 | 1067 | ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0; |
---|
1000 | 1068 | if (ret) |
---|
1001 | 1069 | break; |
---|
.. | .. |
---|
1046 | 1114 | }; |
---|
1047 | 1115 | |
---|
1048 | 1116 | static struct cmd_interrupt_event cmd_interrupt_events[] = { |
---|
1049 | | - [RCS] = { |
---|
| 1117 | + [RCS0] = { |
---|
1050 | 1118 | .pipe_control_notify = RCS_PIPE_CONTROL, |
---|
1051 | 1119 | .mi_flush_dw = INTEL_GVT_EVENT_RESERVED, |
---|
1052 | 1120 | .mi_user_interrupt = RCS_MI_USER_INTERRUPT, |
---|
1053 | 1121 | }, |
---|
1054 | | - [BCS] = { |
---|
| 1122 | + [BCS0] = { |
---|
1055 | 1123 | .pipe_control_notify = INTEL_GVT_EVENT_RESERVED, |
---|
1056 | 1124 | .mi_flush_dw = BCS_MI_FLUSH_DW, |
---|
1057 | 1125 | .mi_user_interrupt = BCS_MI_USER_INTERRUPT, |
---|
1058 | 1126 | }, |
---|
1059 | | - [VCS] = { |
---|
| 1127 | + [VCS0] = { |
---|
1060 | 1128 | .pipe_control_notify = INTEL_GVT_EVENT_RESERVED, |
---|
1061 | 1129 | .mi_flush_dw = VCS_MI_FLUSH_DW, |
---|
1062 | 1130 | .mi_user_interrupt = VCS_MI_USER_INTERRUPT, |
---|
1063 | 1131 | }, |
---|
1064 | | - [VCS2] = { |
---|
| 1132 | + [VCS1] = { |
---|
1065 | 1133 | .pipe_control_notify = INTEL_GVT_EVENT_RESERVED, |
---|
1066 | 1134 | .mi_flush_dw = VCS2_MI_FLUSH_DW, |
---|
1067 | 1135 | .mi_user_interrupt = VCS2_MI_USER_INTERRUPT, |
---|
1068 | 1136 | }, |
---|
1069 | | - [VECS] = { |
---|
| 1137 | + [VECS0] = { |
---|
1070 | 1138 | .pipe_control_notify = INTEL_GVT_EVENT_RESERVED, |
---|
1071 | 1139 | .mi_flush_dw = VECS_MI_FLUSH_DW, |
---|
1072 | 1140 | .mi_user_interrupt = VECS_MI_USER_INTERRUPT, |
---|
.. | .. |
---|
1080 | 1148 | bool index_mode = false; |
---|
1081 | 1149 | unsigned int post_sync; |
---|
1082 | 1150 | int ret = 0; |
---|
| 1151 | + u32 hws_pga, val; |
---|
1083 | 1152 | |
---|
1084 | 1153 | post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14; |
---|
1085 | 1154 | |
---|
.. | .. |
---|
1103 | 1172 | index_mode = true; |
---|
1104 | 1173 | ret |= cmd_address_audit(s, gma, sizeof(u64), |
---|
1105 | 1174 | index_mode); |
---|
| 1175 | + if (ret) |
---|
| 1176 | + return ret; |
---|
| 1177 | + if (index_mode) { |
---|
| 1178 | + hws_pga = s->vgpu->hws_pga[s->engine->id]; |
---|
| 1179 | + gma = hws_pga + gma; |
---|
| 1180 | + patch_value(s, cmd_ptr(s, 2), gma); |
---|
| 1181 | + val = cmd_val(s, 1) & (~(1 << 21)); |
---|
| 1182 | + patch_value(s, cmd_ptr(s, 1), val); |
---|
| 1183 | + } |
---|
1106 | 1184 | } |
---|
1107 | 1185 | } |
---|
1108 | 1186 | } |
---|
.. | .. |
---|
1111 | 1189 | return ret; |
---|
1112 | 1190 | |
---|
1113 | 1191 | if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY) |
---|
1114 | | - set_bit(cmd_interrupt_events[s->ring_id].pipe_control_notify, |
---|
1115 | | - s->workload->pending_events); |
---|
| 1192 | + set_bit(cmd_interrupt_events[s->engine->id].pipe_control_notify, |
---|
| 1193 | + s->workload->pending_events); |
---|
1116 | 1194 | return 0; |
---|
1117 | 1195 | } |
---|
1118 | 1196 | |
---|
1119 | 1197 | static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s) |
---|
1120 | 1198 | { |
---|
1121 | | - set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt, |
---|
1122 | | - s->workload->pending_events); |
---|
| 1199 | + set_bit(cmd_interrupt_events[s->engine->id].mi_user_interrupt, |
---|
| 1200 | + s->workload->pending_events); |
---|
1123 | 1201 | patch_value(s, cmd_ptr(s, 0), MI_NOOP); |
---|
1124 | 1202 | return 0; |
---|
1125 | 1203 | } |
---|
.. | .. |
---|
1169 | 1247 | static int gen8_decode_mi_display_flip(struct parser_exec_state *s, |
---|
1170 | 1248 | struct mi_display_flip_command_info *info) |
---|
1171 | 1249 | { |
---|
1172 | | - struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; |
---|
| 1250 | + struct drm_i915_private *dev_priv = s->engine->i915; |
---|
1173 | 1251 | struct plane_code_mapping gen8_plane_code[] = { |
---|
1174 | 1252 | [0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE}, |
---|
1175 | 1253 | [1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE}, |
---|
.. | .. |
---|
1186 | 1264 | dword2 = cmd_val(s, 2); |
---|
1187 | 1265 | |
---|
1188 | 1266 | v = (dword0 & GENMASK(21, 19)) >> 19; |
---|
1189 | | - if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code))) |
---|
| 1267 | + if (drm_WARN_ON(&dev_priv->drm, v >= ARRAY_SIZE(gen8_plane_code))) |
---|
1190 | 1268 | return -EBADRQC; |
---|
1191 | 1269 | |
---|
1192 | 1270 | info->pipe = gen8_plane_code[v].pipe; |
---|
.. | .. |
---|
1206 | 1284 | info->stride_reg = SPRSTRIDE(info->pipe); |
---|
1207 | 1285 | info->surf_reg = SPRSURF(info->pipe); |
---|
1208 | 1286 | } else { |
---|
1209 | | - WARN_ON(1); |
---|
| 1287 | + drm_WARN_ON(&dev_priv->drm, 1); |
---|
1210 | 1288 | return -EBADRQC; |
---|
1211 | 1289 | } |
---|
1212 | 1290 | return 0; |
---|
.. | .. |
---|
1215 | 1293 | static int skl_decode_mi_display_flip(struct parser_exec_state *s, |
---|
1216 | 1294 | struct mi_display_flip_command_info *info) |
---|
1217 | 1295 | { |
---|
1218 | | - struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; |
---|
| 1296 | + struct drm_i915_private *dev_priv = s->engine->i915; |
---|
1219 | 1297 | struct intel_vgpu *vgpu = s->vgpu; |
---|
1220 | 1298 | u32 dword0 = cmd_val(s, 0); |
---|
1221 | 1299 | u32 dword1 = cmd_val(s, 1); |
---|
.. | .. |
---|
1274 | 1352 | static int gen8_check_mi_display_flip(struct parser_exec_state *s, |
---|
1275 | 1353 | struct mi_display_flip_command_info *info) |
---|
1276 | 1354 | { |
---|
1277 | | - struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; |
---|
1278 | 1355 | u32 stride, tile; |
---|
1279 | 1356 | |
---|
1280 | 1357 | if (!info->async_flip) |
---|
1281 | 1358 | return 0; |
---|
1282 | 1359 | |
---|
1283 | | - if (IS_SKYLAKE(dev_priv) |
---|
1284 | | - || IS_KABYLAKE(dev_priv) |
---|
1285 | | - || IS_BROXTON(dev_priv)) { |
---|
| 1360 | + if (INTEL_GEN(s->engine->i915) >= 9) { |
---|
1286 | 1361 | stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0); |
---|
1287 | 1362 | tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & |
---|
1288 | 1363 | GENMASK(12, 10)) >> 10; |
---|
.. | .. |
---|
1305 | 1380 | struct parser_exec_state *s, |
---|
1306 | 1381 | struct mi_display_flip_command_info *info) |
---|
1307 | 1382 | { |
---|
1308 | | - struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; |
---|
| 1383 | + struct drm_i915_private *dev_priv = s->engine->i915; |
---|
1309 | 1384 | struct intel_vgpu *vgpu = s->vgpu; |
---|
1310 | 1385 | |
---|
1311 | 1386 | set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12), |
---|
1312 | 1387 | info->surf_val << 12); |
---|
1313 | | - if (IS_SKYLAKE(dev_priv) |
---|
1314 | | - || IS_KABYLAKE(dev_priv) |
---|
1315 | | - || IS_BROXTON(dev_priv)) { |
---|
| 1388 | + if (INTEL_GEN(dev_priv) >= 9) { |
---|
1316 | 1389 | set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0), |
---|
1317 | 1390 | info->stride_val); |
---|
1318 | 1391 | set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10), |
---|
.. | .. |
---|
1324 | 1397 | info->tile_val << 10); |
---|
1325 | 1398 | } |
---|
1326 | 1399 | |
---|
1327 | | - vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++; |
---|
1328 | | - intel_vgpu_trigger_virtual_event(vgpu, info->event); |
---|
| 1400 | + if (info->plane == PLANE_PRIMARY) |
---|
| 1401 | + vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(info->pipe))++; |
---|
| 1402 | + |
---|
| 1403 | + if (info->async_flip) |
---|
| 1404 | + intel_vgpu_trigger_virtual_event(vgpu, info->event); |
---|
| 1405 | + else |
---|
| 1406 | + set_bit(info->event, vgpu->irq.flip_done_event[info->pipe]); |
---|
| 1407 | + |
---|
1329 | 1408 | return 0; |
---|
1330 | 1409 | } |
---|
1331 | 1410 | |
---|
1332 | 1411 | static int decode_mi_display_flip(struct parser_exec_state *s, |
---|
1333 | 1412 | struct mi_display_flip_command_info *info) |
---|
1334 | 1413 | { |
---|
1335 | | - struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; |
---|
1336 | | - |
---|
1337 | | - if (IS_BROADWELL(dev_priv)) |
---|
| 1414 | + if (IS_BROADWELL(s->engine->i915)) |
---|
1338 | 1415 | return gen8_decode_mi_display_flip(s, info); |
---|
1339 | | - if (IS_SKYLAKE(dev_priv) |
---|
1340 | | - || IS_KABYLAKE(dev_priv) |
---|
1341 | | - || IS_BROXTON(dev_priv)) |
---|
| 1416 | + if (INTEL_GEN(s->engine->i915) >= 9) |
---|
1342 | 1417 | return skl_decode_mi_display_flip(s, info); |
---|
1343 | 1418 | |
---|
1344 | 1419 | return -ENODEV; |
---|
.. | .. |
---|
1364 | 1439 | int ret; |
---|
1365 | 1440 | int i; |
---|
1366 | 1441 | int len = cmd_length(s); |
---|
| 1442 | + u32 valid_len = CMD_LEN(1); |
---|
| 1443 | + |
---|
| 1444 | + /* Flip Type == Stereo 3D Flip */ |
---|
| 1445 | + if (DWORD_FIELD(2, 1, 0) == 2) |
---|
| 1446 | + valid_len++; |
---|
| 1447 | + ret = gvt_check_valid_cmd_length(cmd_length(s), |
---|
| 1448 | + valid_len); |
---|
| 1449 | + if (ret) |
---|
| 1450 | + return ret; |
---|
1367 | 1451 | |
---|
1368 | 1452 | ret = decode_mi_display_flip(s, &info); |
---|
1369 | 1453 | if (ret) { |
---|
.. | .. |
---|
1483 | 1567 | int op_size = (cmd_length(s) - 3) * sizeof(u32); |
---|
1484 | 1568 | int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0; |
---|
1485 | 1569 | unsigned long gma, gma_low, gma_high; |
---|
| 1570 | + u32 valid_len = CMD_LEN(2); |
---|
1486 | 1571 | int ret = 0; |
---|
1487 | 1572 | |
---|
1488 | 1573 | /* check ppggt */ |
---|
1489 | 1574 | if (!(cmd_val(s, 0) & (1 << 22))) |
---|
1490 | 1575 | return 0; |
---|
| 1576 | + |
---|
| 1577 | + /* check if QWORD */ |
---|
| 1578 | + if (DWORD_FIELD(0, 21, 21)) |
---|
| 1579 | + valid_len++; |
---|
| 1580 | + ret = gvt_check_valid_cmd_length(cmd_length(s), |
---|
| 1581 | + valid_len); |
---|
| 1582 | + if (ret) |
---|
| 1583 | + return ret; |
---|
1491 | 1584 | |
---|
1492 | 1585 | gma = cmd_val(s, 2) & GENMASK(31, 2); |
---|
1493 | 1586 | |
---|
.. | .. |
---|
1531 | 1624 | int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) * |
---|
1532 | 1625 | sizeof(u32); |
---|
1533 | 1626 | unsigned long gma, gma_high; |
---|
| 1627 | + u32 valid_len = CMD_LEN(1); |
---|
1534 | 1628 | int ret = 0; |
---|
1535 | 1629 | |
---|
1536 | 1630 | if (!(cmd_val(s, 0) & (1 << 22))) |
---|
| 1631 | + return ret; |
---|
| 1632 | + |
---|
| 1633 | + /* check inline data */ |
---|
| 1634 | + if (cmd_val(s, 0) & BIT(18)) |
---|
| 1635 | + valid_len = CMD_LEN(9); |
---|
| 1636 | + ret = gvt_check_valid_cmd_length(cmd_length(s), |
---|
| 1637 | + valid_len); |
---|
| 1638 | + if (ret) |
---|
1537 | 1639 | return ret; |
---|
1538 | 1640 | |
---|
1539 | 1641 | gma = cmd_val(s, 1) & GENMASK(31, 2); |
---|
.. | .. |
---|
1572 | 1674 | unsigned long gma; |
---|
1573 | 1675 | bool index_mode = false; |
---|
1574 | 1676 | int ret = 0; |
---|
| 1677 | + u32 hws_pga, val; |
---|
| 1678 | + u32 valid_len = CMD_LEN(2); |
---|
| 1679 | + |
---|
| 1680 | + ret = gvt_check_valid_cmd_length(cmd_length(s), |
---|
| 1681 | + valid_len); |
---|
| 1682 | + if (ret) { |
---|
| 1683 | + /* Check again for Qword */ |
---|
| 1684 | + ret = gvt_check_valid_cmd_length(cmd_length(s), |
---|
| 1685 | + ++valid_len); |
---|
| 1686 | + return ret; |
---|
| 1687 | + } |
---|
1575 | 1688 | |
---|
1576 | 1689 | /* Check post-sync and ppgtt bit */ |
---|
1577 | 1690 | if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) { |
---|
.. | .. |
---|
1582 | 1695 | if (cmd_val(s, 0) & (1 << 21)) |
---|
1583 | 1696 | index_mode = true; |
---|
1584 | 1697 | ret = cmd_address_audit(s, gma, sizeof(u64), index_mode); |
---|
| 1698 | + if (ret) |
---|
| 1699 | + return ret; |
---|
| 1700 | + if (index_mode) { |
---|
| 1701 | + hws_pga = s->vgpu->hws_pga[s->engine->id]; |
---|
| 1702 | + gma = hws_pga + gma; |
---|
| 1703 | + patch_value(s, cmd_ptr(s, 1), gma); |
---|
| 1704 | + val = cmd_val(s, 0) & (~(1 << 21)); |
---|
| 1705 | + patch_value(s, cmd_ptr(s, 0), val); |
---|
| 1706 | + } |
---|
1585 | 1707 | } |
---|
1586 | 1708 | /* Check notify bit */ |
---|
1587 | 1709 | if ((cmd_val(s, 0) & (1 << 8))) |
---|
1588 | | - set_bit(cmd_interrupt_events[s->ring_id].mi_flush_dw, |
---|
1589 | | - s->workload->pending_events); |
---|
| 1710 | + set_bit(cmd_interrupt_events[s->engine->id].mi_flush_dw, |
---|
| 1711 | + s->workload->pending_events); |
---|
1590 | 1712 | return ret; |
---|
1591 | 1713 | } |
---|
1592 | 1714 | |
---|
.. | .. |
---|
1634 | 1756 | static int batch_buffer_needs_scan(struct parser_exec_state *s) |
---|
1635 | 1757 | { |
---|
1636 | 1758 | /* Decide privilege based on address space */ |
---|
1637 | | - if (cmd_val(s, 0) & (1 << 8) && |
---|
1638 | | - !(s->vgpu->scan_nonprivbb & (1 << s->ring_id))) |
---|
| 1759 | + if (cmd_val(s, 0) & BIT(8) && |
---|
| 1760 | + !(s->vgpu->scan_nonprivbb & s->engine->mask)) |
---|
1639 | 1761 | return 0; |
---|
| 1762 | + |
---|
1640 | 1763 | return 1; |
---|
1641 | 1764 | } |
---|
1642 | 1765 | |
---|
1643 | | -static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size) |
---|
| 1766 | +static const char *repr_addr_type(unsigned int type) |
---|
| 1767 | +{ |
---|
| 1768 | + return type == PPGTT_BUFFER ? "ppgtt" : "ggtt"; |
---|
| 1769 | +} |
---|
| 1770 | + |
---|
| 1771 | +static int find_bb_size(struct parser_exec_state *s, |
---|
| 1772 | + unsigned long *bb_size, |
---|
| 1773 | + unsigned long *bb_end_cmd_offset) |
---|
1644 | 1774 | { |
---|
1645 | 1775 | unsigned long gma = 0; |
---|
1646 | | - struct cmd_info *info; |
---|
1647 | | - uint32_t cmd_len = 0; |
---|
| 1776 | + const struct cmd_info *info; |
---|
| 1777 | + u32 cmd_len = 0; |
---|
1648 | 1778 | bool bb_end = false; |
---|
1649 | 1779 | struct intel_vgpu *vgpu = s->vgpu; |
---|
1650 | 1780 | u32 cmd; |
---|
.. | .. |
---|
1652 | 1782 | s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm; |
---|
1653 | 1783 | |
---|
1654 | 1784 | *bb_size = 0; |
---|
| 1785 | + *bb_end_cmd_offset = 0; |
---|
1655 | 1786 | |
---|
1656 | 1787 | /* get the start gm address of the batch buffer */ |
---|
1657 | 1788 | gma = get_gma_bb_from_cmd(s, 1); |
---|
.. | .. |
---|
1659 | 1790 | return -EFAULT; |
---|
1660 | 1791 | |
---|
1661 | 1792 | cmd = cmd_val(s, 0); |
---|
1662 | | - info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); |
---|
| 1793 | + info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); |
---|
1663 | 1794 | if (info == NULL) { |
---|
1664 | | - gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n", |
---|
1665 | | - cmd, get_opcode(cmd, s->ring_id), |
---|
1666 | | - (s->buf_addr_type == PPGTT_BUFFER) ? |
---|
1667 | | - "ppgtt" : "ggtt", s->ring_id, s->workload); |
---|
| 1795 | + gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n", |
---|
| 1796 | + cmd, get_opcode(cmd, s->engine), |
---|
| 1797 | + repr_addr_type(s->buf_addr_type), |
---|
| 1798 | + s->engine->name, s->workload); |
---|
1668 | 1799 | return -EBADRQC; |
---|
1669 | 1800 | } |
---|
1670 | 1801 | do { |
---|
1671 | 1802 | if (copy_gma_to_hva(s->vgpu, mm, |
---|
1672 | | - gma, gma + 4, &cmd) < 0) |
---|
| 1803 | + gma, gma + 4, &cmd) < 0) |
---|
1673 | 1804 | return -EFAULT; |
---|
1674 | | - info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); |
---|
| 1805 | + info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); |
---|
1675 | 1806 | if (info == NULL) { |
---|
1676 | | - gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n", |
---|
1677 | | - cmd, get_opcode(cmd, s->ring_id), |
---|
1678 | | - (s->buf_addr_type == PPGTT_BUFFER) ? |
---|
1679 | | - "ppgtt" : "ggtt", s->ring_id, s->workload); |
---|
| 1807 | + gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n", |
---|
| 1808 | + cmd, get_opcode(cmd, s->engine), |
---|
| 1809 | + repr_addr_type(s->buf_addr_type), |
---|
| 1810 | + s->engine->name, s->workload); |
---|
1680 | 1811 | return -EBADRQC; |
---|
1681 | 1812 | } |
---|
1682 | 1813 | |
---|
.. | .. |
---|
1687 | 1818 | /* chained batch buffer */ |
---|
1688 | 1819 | bb_end = true; |
---|
1689 | 1820 | } |
---|
| 1821 | + |
---|
| 1822 | + if (bb_end) |
---|
| 1823 | + *bb_end_cmd_offset = *bb_size; |
---|
| 1824 | + |
---|
1690 | 1825 | cmd_len = get_cmd_length(info, cmd) << 2; |
---|
1691 | 1826 | *bb_size += cmd_len; |
---|
1692 | 1827 | gma += cmd_len; |
---|
.. | .. |
---|
1695 | 1830 | return 0; |
---|
1696 | 1831 | } |
---|
1697 | 1832 | |
---|
| 1833 | +static int audit_bb_end(struct parser_exec_state *s, void *va) |
---|
| 1834 | +{ |
---|
| 1835 | + struct intel_vgpu *vgpu = s->vgpu; |
---|
| 1836 | + u32 cmd = *(u32 *)va; |
---|
| 1837 | + const struct cmd_info *info; |
---|
| 1838 | + |
---|
| 1839 | + info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); |
---|
| 1840 | + if (info == NULL) { |
---|
| 1841 | + gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n", |
---|
| 1842 | + cmd, get_opcode(cmd, s->engine), |
---|
| 1843 | + repr_addr_type(s->buf_addr_type), |
---|
| 1844 | + s->engine->name, s->workload); |
---|
| 1845 | + return -EBADRQC; |
---|
| 1846 | + } |
---|
| 1847 | + |
---|
| 1848 | + if ((info->opcode == OP_MI_BATCH_BUFFER_END) || |
---|
| 1849 | + ((info->opcode == OP_MI_BATCH_BUFFER_START) && |
---|
| 1850 | + (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0))) |
---|
| 1851 | + return 0; |
---|
| 1852 | + |
---|
| 1853 | + return -EBADRQC; |
---|
| 1854 | +} |
---|
| 1855 | + |
---|
1698 | 1856 | static int perform_bb_shadow(struct parser_exec_state *s) |
---|
1699 | 1857 | { |
---|
1700 | 1858 | struct intel_vgpu *vgpu = s->vgpu; |
---|
1701 | 1859 | struct intel_vgpu_shadow_bb *bb; |
---|
1702 | 1860 | unsigned long gma = 0; |
---|
1703 | 1861 | unsigned long bb_size; |
---|
| 1862 | + unsigned long bb_end_cmd_offset; |
---|
1704 | 1863 | int ret = 0; |
---|
1705 | 1864 | struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ? |
---|
1706 | 1865 | s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm; |
---|
1707 | | - unsigned long gma_start_offset = 0; |
---|
| 1866 | + unsigned long start_offset = 0; |
---|
1708 | 1867 | |
---|
1709 | 1868 | /* get the start gm address of the batch buffer */ |
---|
1710 | 1869 | gma = get_gma_bb_from_cmd(s, 1); |
---|
1711 | 1870 | if (gma == INTEL_GVT_INVALID_ADDR) |
---|
1712 | 1871 | return -EFAULT; |
---|
1713 | 1872 | |
---|
1714 | | - ret = find_bb_size(s, &bb_size); |
---|
| 1873 | + ret = find_bb_size(s, &bb_size, &bb_end_cmd_offset); |
---|
1715 | 1874 | if (ret) |
---|
1716 | 1875 | return ret; |
---|
1717 | 1876 | |
---|
.. | .. |
---|
1721 | 1880 | |
---|
1722 | 1881 | bb->ppgtt = (s->buf_addr_type == GTT_BUFFER) ? false : true; |
---|
1723 | 1882 | |
---|
1724 | | - /* the gma_start_offset stores the batch buffer's start gma's |
---|
| 1883 | + /* the start_offset stores the batch buffer's start gma's |
---|
1725 | 1884 | * offset relative to page boundary. so for non-privileged batch |
---|
1726 | 1885 | * buffer, the shadowed gem object holds exactly the same page |
---|
1727 | 1886 | * layout as original gem object. This is for the convience of |
---|
.. | .. |
---|
1733 | 1892 | * that of shadowed page. |
---|
1734 | 1893 | */ |
---|
1735 | 1894 | if (bb->ppgtt) |
---|
1736 | | - gma_start_offset = gma & ~I915_GTT_PAGE_MASK; |
---|
| 1895 | + start_offset = gma & ~I915_GTT_PAGE_MASK; |
---|
1737 | 1896 | |
---|
1738 | | - bb->obj = i915_gem_object_create(s->vgpu->gvt->dev_priv, |
---|
1739 | | - roundup(bb_size + gma_start_offset, PAGE_SIZE)); |
---|
| 1897 | + bb->obj = i915_gem_object_create_shmem(s->engine->i915, |
---|
| 1898 | + round_up(bb_size + start_offset, |
---|
| 1899 | + PAGE_SIZE)); |
---|
1740 | 1900 | if (IS_ERR(bb->obj)) { |
---|
1741 | 1901 | ret = PTR_ERR(bb->obj); |
---|
1742 | 1902 | goto err_free_bb; |
---|
1743 | 1903 | } |
---|
1744 | 1904 | |
---|
1745 | | - ret = i915_gem_obj_prepare_shmem_write(bb->obj, &bb->clflush); |
---|
1746 | | - if (ret) |
---|
1747 | | - goto err_free_obj; |
---|
1748 | | - |
---|
1749 | 1905 | bb->va = i915_gem_object_pin_map(bb->obj, I915_MAP_WB); |
---|
1750 | 1906 | if (IS_ERR(bb->va)) { |
---|
1751 | 1907 | ret = PTR_ERR(bb->va); |
---|
1752 | | - goto err_finish_shmem_access; |
---|
1753 | | - } |
---|
1754 | | - |
---|
1755 | | - if (bb->clflush & CLFLUSH_BEFORE) { |
---|
1756 | | - drm_clflush_virt_range(bb->va, bb->obj->base.size); |
---|
1757 | | - bb->clflush &= ~CLFLUSH_BEFORE; |
---|
| 1908 | + goto err_free_obj; |
---|
1758 | 1909 | } |
---|
1759 | 1910 | |
---|
1760 | 1911 | ret = copy_gma_to_hva(s->vgpu, mm, |
---|
1761 | 1912 | gma, gma + bb_size, |
---|
1762 | | - bb->va + gma_start_offset); |
---|
| 1913 | + bb->va + start_offset); |
---|
1763 | 1914 | if (ret < 0) { |
---|
1764 | 1915 | gvt_vgpu_err("fail to copy guest ring buffer\n"); |
---|
1765 | 1916 | ret = -EFAULT; |
---|
1766 | 1917 | goto err_unmap; |
---|
1767 | 1918 | } |
---|
1768 | 1919 | |
---|
| 1920 | + ret = audit_bb_end(s, bb->va + start_offset + bb_end_cmd_offset); |
---|
| 1921 | + if (ret) |
---|
| 1922 | + goto err_unmap; |
---|
| 1923 | + |
---|
| 1924 | + i915_gem_object_unlock(bb->obj); |
---|
1769 | 1925 | INIT_LIST_HEAD(&bb->list); |
---|
1770 | 1926 | list_add(&bb->list, &s->workload->shadow_bb); |
---|
1771 | 1927 | |
---|
1772 | | - bb->accessing = true; |
---|
1773 | 1928 | bb->bb_start_cmd_va = s->ip_va; |
---|
1774 | 1929 | |
---|
1775 | 1930 | if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa)) |
---|
.. | .. |
---|
1785 | 1940 | * buffer's gma in pair. After all, we don't want to pin the shadow |
---|
1786 | 1941 | * buffer here (too early). |
---|
1787 | 1942 | */ |
---|
1788 | | - s->ip_va = bb->va + gma_start_offset; |
---|
| 1943 | + s->ip_va = bb->va + start_offset; |
---|
1789 | 1944 | s->ip_gma = gma; |
---|
1790 | 1945 | return 0; |
---|
1791 | 1946 | err_unmap: |
---|
1792 | 1947 | i915_gem_object_unpin_map(bb->obj); |
---|
1793 | | -err_finish_shmem_access: |
---|
1794 | | - i915_gem_obj_finish_shmem_access(bb->obj); |
---|
1795 | 1948 | err_free_obj: |
---|
1796 | 1949 | i915_gem_object_put(bb->obj); |
---|
1797 | 1950 | err_free_bb: |
---|
.. | .. |
---|
1840 | 1993 | return ret; |
---|
1841 | 1994 | } |
---|
1842 | 1995 | |
---|
1843 | | -static struct cmd_info cmd_info[] = { |
---|
| 1996 | +static int mi_noop_index; |
---|
| 1997 | + |
---|
| 1998 | +static const struct cmd_info cmd_info[] = { |
---|
1844 | 1999 | {"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL}, |
---|
1845 | 2000 | |
---|
1846 | 2001 | {"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL, |
---|
.. | .. |
---|
1888 | 2043 | {"MI_RS_CONTEXT", OP_MI_RS_CONTEXT, F_LEN_CONST, R_RCS, D_ALL, 0, 1, |
---|
1889 | 2044 | NULL}, |
---|
1890 | 2045 | |
---|
1891 | | - {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR | F_POST_HANDLE, |
---|
| 2046 | + {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR, |
---|
1892 | 2047 | R_RCS | R_BCS, D_ALL, 0, 8, cmd_handler_mi_display_flip}, |
---|
1893 | 2048 | |
---|
1894 | | - {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR, R_ALL, D_ALL, |
---|
1895 | | - 0, 8, NULL}, |
---|
| 2049 | + {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR | F_LEN_VAR_FIXED, |
---|
| 2050 | + R_ALL, D_ALL, 0, 8, NULL, CMD_LEN(1)}, |
---|
1896 | 2051 | |
---|
1897 | 2052 | {"MI_MATH", OP_MI_MATH, F_LEN_VAR, R_ALL, D_ALL, 0, 8, NULL}, |
---|
1898 | 2053 | |
---|
1899 | | - {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, |
---|
| 2054 | + {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS, |
---|
| 2055 | + D_ALL, 0, 8, NULL, CMD_LEN(0)}, |
---|
1900 | 2056 | |
---|
1901 | | - {"ME_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL, |
---|
1902 | | - D_BDW_PLUS, 0, 8, NULL}, |
---|
| 2057 | + {"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, |
---|
| 2058 | + F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, 0, 8, |
---|
| 2059 | + NULL, CMD_LEN(0)}, |
---|
1903 | 2060 | |
---|
1904 | | - {"ME_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL, D_BDW_PLUS, |
---|
1905 | | - ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait}, |
---|
| 2061 | + {"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, |
---|
| 2062 | + F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, ADDR_FIX_1(2), |
---|
| 2063 | + 8, cmd_handler_mi_semaphore_wait, CMD_LEN(2)}, |
---|
1906 | 2064 | |
---|
1907 | 2065 | {"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS, |
---|
1908 | 2066 | ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm}, |
---|
.. | .. |
---|
1916 | 2074 | {"MI_UPDATE_GTT", OP_MI_UPDATE_GTT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 10, |
---|
1917 | 2075 | cmd_handler_mi_update_gtt}, |
---|
1918 | 2076 | |
---|
1919 | | - {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM, F_LEN_VAR, R_ALL, |
---|
1920 | | - D_ALL, ADDR_FIX_1(2), 8, cmd_handler_srm}, |
---|
| 2077 | + {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM, |
---|
| 2078 | + F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8, |
---|
| 2079 | + cmd_handler_srm, CMD_LEN(2)}, |
---|
1921 | 2080 | |
---|
1922 | 2081 | {"MI_FLUSH_DW", OP_MI_FLUSH_DW, F_LEN_VAR, R_ALL, D_ALL, 0, 6, |
---|
1923 | 2082 | cmd_handler_mi_flush_dw}, |
---|
.. | .. |
---|
1925 | 2084 | {"MI_CLFLUSH", OP_MI_CLFLUSH, F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(1), |
---|
1926 | 2085 | 10, cmd_handler_mi_clflush}, |
---|
1927 | 2086 | |
---|
1928 | | - {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT, F_LEN_VAR, R_ALL, |
---|
1929 | | - D_ALL, ADDR_FIX_1(1), 6, cmd_handler_mi_report_perf_count}, |
---|
| 2087 | + {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT, |
---|
| 2088 | + F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(1), 6, |
---|
| 2089 | + cmd_handler_mi_report_perf_count, CMD_LEN(2)}, |
---|
1930 | 2090 | |
---|
1931 | | - {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM, F_LEN_VAR, R_ALL, |
---|
1932 | | - D_ALL, ADDR_FIX_1(2), 8, cmd_handler_lrm}, |
---|
| 2091 | + {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM, |
---|
| 2092 | + F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8, |
---|
| 2093 | + cmd_handler_lrm, CMD_LEN(2)}, |
---|
1933 | 2094 | |
---|
1934 | | - {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG, F_LEN_VAR, R_ALL, |
---|
1935 | | - D_ALL, 0, 8, cmd_handler_lrr}, |
---|
| 2095 | + {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG, |
---|
| 2096 | + F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, 0, 8, |
---|
| 2097 | + cmd_handler_lrr, CMD_LEN(1)}, |
---|
1936 | 2098 | |
---|
1937 | | - {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM, F_LEN_VAR, R_RCS, |
---|
1938 | | - D_ALL, 0, 8, NULL}, |
---|
| 2099 | + {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM, |
---|
| 2100 | + F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS, D_ALL, 0, |
---|
| 2101 | + 8, NULL, CMD_LEN(2)}, |
---|
1939 | 2102 | |
---|
1940 | | - {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR, R_RCS, D_ALL, |
---|
1941 | | - ADDR_FIX_1(2), 8, NULL}, |
---|
| 2103 | + {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR | F_LEN_VAR_FIXED, |
---|
| 2104 | + R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL, CMD_LEN(2)}, |
---|
1942 | 2105 | |
---|
1943 | 2106 | {"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM, F_LEN_VAR, R_RCS, D_ALL, |
---|
1944 | 2107 | ADDR_FIX_1(2), 8, NULL}, |
---|
1945 | 2108 | |
---|
1946 | | - {"MI_OP_2E", OP_MI_2E, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_2(1, 2), |
---|
1947 | | - 8, cmd_handler_mi_op_2e}, |
---|
| 2109 | + {"MI_OP_2E", OP_MI_2E, F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, |
---|
| 2110 | + ADDR_FIX_2(1, 2), 8, cmd_handler_mi_op_2e, CMD_LEN(3)}, |
---|
1948 | 2111 | |
---|
1949 | 2112 | {"MI_OP_2F", OP_MI_2F, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1), |
---|
1950 | 2113 | 8, cmd_handler_mi_op_2f}, |
---|
.. | .. |
---|
1954 | 2117 | cmd_handler_mi_batch_buffer_start}, |
---|
1955 | 2118 | |
---|
1956 | 2119 | {"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END, |
---|
1957 | | - F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(2), 8, |
---|
1958 | | - cmd_handler_mi_conditional_batch_buffer_end}, |
---|
| 2120 | + F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8, |
---|
| 2121 | + cmd_handler_mi_conditional_batch_buffer_end, CMD_LEN(2)}, |
---|
1959 | 2122 | |
---|
1960 | 2123 | {"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL, F_LEN_CONST, |
---|
1961 | 2124 | R_RCS | R_BCS, D_ALL, 0, 2, NULL}, |
---|
.. | .. |
---|
2343 | 2506 | {"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4, F_LEN_VAR, R_RCS, D_ALL, |
---|
2344 | 2507 | ADDR_FIX_1(1), 8, NULL}, |
---|
2345 | 2508 | |
---|
| 2509 | + {"OP_SWTESS_BASE_ADDRESS", OP_SWTESS_BASE_ADDRESS, |
---|
| 2510 | + F_LEN_VAR, R_RCS, D_ALL, ADDR_FIX_2(1, 2), 3, NULL}, |
---|
| 2511 | + |
---|
2346 | 2512 | {"3DSTATE_VS", OP_3DSTATE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, |
---|
2347 | 2513 | |
---|
2348 | 2514 | {"3DSTATE_SF", OP_3DSTATE_SF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, |
---|
.. | .. |
---|
2507 | 2673 | 0, 12, NULL}, |
---|
2508 | 2674 | |
---|
2509 | 2675 | {"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS, |
---|
2510 | | - 0, 20, NULL}, |
---|
| 2676 | + 0, 12, NULL}, |
---|
2511 | 2677 | }; |
---|
2512 | 2678 | |
---|
2513 | 2679 | static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e) |
---|
.. | .. |
---|
2519 | 2685 | static int cmd_parser_exec(struct parser_exec_state *s) |
---|
2520 | 2686 | { |
---|
2521 | 2687 | struct intel_vgpu *vgpu = s->vgpu; |
---|
2522 | | - struct cmd_info *info; |
---|
| 2688 | + const struct cmd_info *info; |
---|
2523 | 2689 | u32 cmd; |
---|
2524 | 2690 | int ret = 0; |
---|
2525 | 2691 | |
---|
2526 | 2692 | cmd = cmd_val(s, 0); |
---|
2527 | 2693 | |
---|
2528 | | - info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); |
---|
| 2694 | + /* fastpath for MI_NOOP */ |
---|
| 2695 | + if (cmd == MI_NOOP) |
---|
| 2696 | + info = &cmd_info[mi_noop_index]; |
---|
| 2697 | + else |
---|
| 2698 | + info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); |
---|
| 2699 | + |
---|
2529 | 2700 | if (info == NULL) { |
---|
2530 | | - gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n", |
---|
2531 | | - cmd, get_opcode(cmd, s->ring_id), |
---|
2532 | | - (s->buf_addr_type == PPGTT_BUFFER) ? |
---|
2533 | | - "ppgtt" : "ggtt", s->ring_id, s->workload); |
---|
| 2701 | + gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n", |
---|
| 2702 | + cmd, get_opcode(cmd, s->engine), |
---|
| 2703 | + repr_addr_type(s->buf_addr_type), |
---|
| 2704 | + s->engine->name, s->workload); |
---|
2534 | 2705 | return -EBADRQC; |
---|
2535 | 2706 | } |
---|
2536 | 2707 | |
---|
2537 | 2708 | s->info = info; |
---|
2538 | 2709 | |
---|
2539 | | - trace_gvt_command(vgpu->id, s->ring_id, s->ip_gma, s->ip_va, |
---|
| 2710 | + trace_gvt_command(vgpu->id, s->engine->id, s->ip_gma, s->ip_va, |
---|
2540 | 2711 | cmd_length(s), s->buf_type, s->buf_addr_type, |
---|
2541 | 2712 | s->workload, info->name); |
---|
| 2713 | + |
---|
| 2714 | + if ((info->flag & F_LEN_MASK) == F_LEN_VAR_FIXED) { |
---|
| 2715 | + ret = gvt_check_valid_cmd_length(cmd_length(s), |
---|
| 2716 | + info->valid_len); |
---|
| 2717 | + if (ret) |
---|
| 2718 | + return ret; |
---|
| 2719 | + } |
---|
2542 | 2720 | |
---|
2543 | 2721 | if (info->handler) { |
---|
2544 | 2722 | ret = info->handler(s); |
---|
.. | .. |
---|
2632 | 2810 | s.buf_type = RING_BUFFER_INSTRUCTION; |
---|
2633 | 2811 | s.buf_addr_type = GTT_BUFFER; |
---|
2634 | 2812 | s.vgpu = workload->vgpu; |
---|
2635 | | - s.ring_id = workload->ring_id; |
---|
| 2813 | + s.engine = workload->engine; |
---|
2636 | 2814 | s.ring_start = workload->rb_start; |
---|
2637 | 2815 | s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl); |
---|
2638 | 2816 | s.ring_head = gma_head; |
---|
.. | .. |
---|
2641 | 2819 | s.workload = workload; |
---|
2642 | 2820 | s.is_ctx_wa = false; |
---|
2643 | 2821 | |
---|
2644 | | - if ((bypass_scan_mask & (1 << workload->ring_id)) || |
---|
2645 | | - gma_head == gma_tail) |
---|
| 2822 | + if (bypass_scan_mask & workload->engine->mask || gma_head == gma_tail) |
---|
2646 | 2823 | return 0; |
---|
2647 | | - |
---|
2648 | | - if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) { |
---|
2649 | | - ret = -EINVAL; |
---|
2650 | | - goto out; |
---|
2651 | | - } |
---|
2652 | 2824 | |
---|
2653 | 2825 | ret = ip_gma_set(&s, gma_head); |
---|
2654 | 2826 | if (ret) |
---|
.. | .. |
---|
2676 | 2848 | I915_GTT_PAGE_SIZE))) |
---|
2677 | 2849 | return -EINVAL; |
---|
2678 | 2850 | |
---|
2679 | | - ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t); |
---|
| 2851 | + ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(u32); |
---|
2680 | 2852 | ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES, |
---|
2681 | 2853 | PAGE_SIZE); |
---|
2682 | 2854 | gma_head = wa_ctx->indirect_ctx.guest_gma; |
---|
.. | .. |
---|
2686 | 2858 | s.buf_type = RING_BUFFER_INSTRUCTION; |
---|
2687 | 2859 | s.buf_addr_type = GTT_BUFFER; |
---|
2688 | 2860 | s.vgpu = workload->vgpu; |
---|
2689 | | - s.ring_id = workload->ring_id; |
---|
| 2861 | + s.engine = workload->engine; |
---|
2690 | 2862 | s.ring_start = wa_ctx->indirect_ctx.guest_gma; |
---|
2691 | 2863 | s.ring_size = ring_size; |
---|
2692 | 2864 | s.ring_head = gma_head; |
---|
.. | .. |
---|
2694 | 2866 | s.rb_va = wa_ctx->indirect_ctx.shadow_va; |
---|
2695 | 2867 | s.workload = workload; |
---|
2696 | 2868 | s.is_ctx_wa = true; |
---|
2697 | | - |
---|
2698 | | - if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) { |
---|
2699 | | - ret = -EINVAL; |
---|
2700 | | - goto out; |
---|
2701 | | - } |
---|
2702 | 2869 | |
---|
2703 | 2870 | ret = ip_gma_set(&s, gma_head); |
---|
2704 | 2871 | if (ret) |
---|
.. | .. |
---|
2716 | 2883 | struct intel_vgpu_submission *s = &vgpu->submission; |
---|
2717 | 2884 | unsigned long gma_head, gma_tail, gma_top, guest_rb_size; |
---|
2718 | 2885 | void *shadow_ring_buffer_va; |
---|
2719 | | - int ring_id = workload->ring_id; |
---|
2720 | 2886 | int ret; |
---|
2721 | 2887 | |
---|
2722 | 2888 | guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl); |
---|
.. | .. |
---|
2729 | 2895 | gma_tail = workload->rb_start + workload->rb_tail; |
---|
2730 | 2896 | gma_top = workload->rb_start + guest_rb_size; |
---|
2731 | 2897 | |
---|
2732 | | - if (workload->rb_len > s->ring_scan_buffer_size[ring_id]) { |
---|
| 2898 | + if (workload->rb_len > s->ring_scan_buffer_size[workload->engine->id]) { |
---|
2733 | 2899 | void *p; |
---|
2734 | 2900 | |
---|
2735 | 2901 | /* realloc the new ring buffer if needed */ |
---|
2736 | | - p = krealloc(s->ring_scan_buffer[ring_id], workload->rb_len, |
---|
2737 | | - GFP_KERNEL); |
---|
| 2902 | + p = krealloc(s->ring_scan_buffer[workload->engine->id], |
---|
| 2903 | + workload->rb_len, GFP_KERNEL); |
---|
2738 | 2904 | if (!p) { |
---|
2739 | 2905 | gvt_vgpu_err("fail to re-alloc ring scan buffer\n"); |
---|
2740 | 2906 | return -ENOMEM; |
---|
2741 | 2907 | } |
---|
2742 | | - s->ring_scan_buffer[ring_id] = p; |
---|
2743 | | - s->ring_scan_buffer_size[ring_id] = workload->rb_len; |
---|
| 2908 | + s->ring_scan_buffer[workload->engine->id] = p; |
---|
| 2909 | + s->ring_scan_buffer_size[workload->engine->id] = workload->rb_len; |
---|
2744 | 2910 | } |
---|
2745 | 2911 | |
---|
2746 | | - shadow_ring_buffer_va = s->ring_scan_buffer[ring_id]; |
---|
| 2912 | + shadow_ring_buffer_va = s->ring_scan_buffer[workload->engine->id]; |
---|
2747 | 2913 | |
---|
2748 | 2914 | /* get shadow ring buffer va */ |
---|
2749 | 2915 | workload->shadow_ring_buffer_va = shadow_ring_buffer_va; |
---|
.. | .. |
---|
2801 | 2967 | int ret = 0; |
---|
2802 | 2968 | void *map; |
---|
2803 | 2969 | |
---|
2804 | | - obj = i915_gem_object_create(workload->vgpu->gvt->dev_priv, |
---|
2805 | | - roundup(ctx_size + CACHELINE_BYTES, |
---|
2806 | | - PAGE_SIZE)); |
---|
| 2970 | + obj = i915_gem_object_create_shmem(workload->engine->i915, |
---|
| 2971 | + roundup(ctx_size + CACHELINE_BYTES, |
---|
| 2972 | + PAGE_SIZE)); |
---|
2807 | 2973 | if (IS_ERR(obj)) |
---|
2808 | 2974 | return PTR_ERR(obj); |
---|
2809 | 2975 | |
---|
.. | .. |
---|
2815 | 2981 | goto put_obj; |
---|
2816 | 2982 | } |
---|
2817 | 2983 | |
---|
| 2984 | + i915_gem_object_lock(obj, NULL); |
---|
2818 | 2985 | ret = i915_gem_object_set_to_cpu_domain(obj, false); |
---|
| 2986 | + i915_gem_object_unlock(obj); |
---|
2819 | 2987 | if (ret) { |
---|
2820 | 2988 | gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n"); |
---|
2821 | 2989 | goto unmap_src; |
---|
.. | .. |
---|
2843 | 3011 | |
---|
2844 | 3012 | static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) |
---|
2845 | 3013 | { |
---|
2846 | | - uint32_t per_ctx_start[CACHELINE_DWORDS] = {0}; |
---|
| 3014 | + u32 per_ctx_start[CACHELINE_DWORDS] = {0}; |
---|
2847 | 3015 | unsigned char *bb_start_sva; |
---|
2848 | 3016 | |
---|
2849 | 3017 | if (!wa_ctx->per_ctx.valid) |
---|
.. | .. |
---|
2888 | 3056 | return 0; |
---|
2889 | 3057 | } |
---|
2890 | 3058 | |
---|
2891 | | -static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt, |
---|
2892 | | - unsigned int opcode, unsigned long rings) |
---|
2893 | | -{ |
---|
2894 | | - struct cmd_info *info = NULL; |
---|
2895 | | - unsigned int ring; |
---|
2896 | | - |
---|
2897 | | - for_each_set_bit(ring, &rings, I915_NUM_ENGINES) { |
---|
2898 | | - info = find_cmd_entry(gvt, opcode, ring); |
---|
2899 | | - if (info) |
---|
2900 | | - break; |
---|
2901 | | - } |
---|
2902 | | - return info; |
---|
2903 | | -} |
---|
2904 | | - |
---|
2905 | 3059 | static int init_cmd_table(struct intel_gvt *gvt) |
---|
2906 | 3060 | { |
---|
| 3061 | + unsigned int gen_type = intel_gvt_get_device_type(gvt); |
---|
2907 | 3062 | int i; |
---|
2908 | | - struct cmd_entry *e; |
---|
2909 | | - struct cmd_info *info; |
---|
2910 | | - unsigned int gen_type; |
---|
2911 | | - |
---|
2912 | | - gen_type = intel_gvt_get_device_type(gvt); |
---|
2913 | 3063 | |
---|
2914 | 3064 | for (i = 0; i < ARRAY_SIZE(cmd_info); i++) { |
---|
| 3065 | + struct cmd_entry *e; |
---|
| 3066 | + |
---|
2915 | 3067 | if (!(cmd_info[i].devices & gen_type)) |
---|
2916 | 3068 | continue; |
---|
2917 | 3069 | |
---|
.. | .. |
---|
2920 | 3072 | return -ENOMEM; |
---|
2921 | 3073 | |
---|
2922 | 3074 | e->info = &cmd_info[i]; |
---|
2923 | | - info = find_cmd_entry_any_ring(gvt, |
---|
2924 | | - e->info->opcode, e->info->rings); |
---|
2925 | | - if (info) { |
---|
2926 | | - gvt_err("%s %s duplicated\n", e->info->name, |
---|
2927 | | - info->name); |
---|
2928 | | - kfree(e); |
---|
2929 | | - return -EEXIST; |
---|
2930 | | - } |
---|
| 3075 | + if (cmd_info[i].opcode == OP_MI_NOOP) |
---|
| 3076 | + mi_noop_index = i; |
---|
2931 | 3077 | |
---|
2932 | 3078 | INIT_HLIST_NODE(&e->hlist); |
---|
2933 | 3079 | add_cmd_entry(gvt, e); |
---|
2934 | 3080 | gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n", |
---|
2935 | | - e->info->name, e->info->opcode, e->info->flag, |
---|
2936 | | - e->info->devices, e->info->rings); |
---|
| 3081 | + e->info->name, e->info->opcode, e->info->flag, |
---|
| 3082 | + e->info->devices, e->info->rings); |
---|
2937 | 3083 | } |
---|
| 3084 | + |
---|
2938 | 3085 | return 0; |
---|
2939 | 3086 | } |
---|
2940 | 3087 | |
---|