| .. | .. |
|---|
| 12 | 12 | #include <scsi/scsi.h> |
|---|
| 13 | 13 | #include <linux/android_kabi.h> |
|---|
| 14 | 14 | |
|---|
| 15 | | -struct request_queue; |
|---|
| 16 | 15 | struct block_device; |
|---|
| 17 | 16 | struct completion; |
|---|
| 18 | 17 | struct module; |
|---|
| .. | .. |
|---|
| 23 | 22 | struct Scsi_Host; |
|---|
| 24 | 23 | struct scsi_host_cmd_pool; |
|---|
| 25 | 24 | struct scsi_transport_template; |
|---|
| 26 | | -struct blk_queue_tags; |
|---|
| 27 | 25 | |
|---|
| 28 | 26 | |
|---|
| 29 | | -/* |
|---|
| 30 | | - * The various choices mean: |
|---|
| 31 | | - * NONE: Self evident. Host adapter is not capable of scatter-gather. |
|---|
| 32 | | - * ALL: Means that the host adapter module can do scatter-gather, |
|---|
| 33 | | - * and that there is no limit to the size of the table to which |
|---|
| 34 | | - * we scatter/gather data. The value we set here is the maximum |
|---|
| 35 | | - * single element sglist. To use chained sglists, the adapter |
|---|
| 36 | | - * has to set a value beyond ALL (and correctly use the chain |
|---|
| 37 | | - * handling API. |
|---|
| 38 | | - * Anything else: Indicates the maximum number of chains that can be |
|---|
| 39 | | - * used in one scatter-gather request. |
|---|
| 40 | | - */ |
|---|
| 41 | | -#define SG_NONE 0 |
|---|
| 42 | 27 | #define SG_ALL SG_CHUNK_SIZE |
|---|
| 43 | 28 | |
|---|
| 44 | 29 | #define MODE_UNKNOWN 0x00 |
|---|
| 45 | 30 | #define MODE_INITIATOR 0x01 |
|---|
| 46 | 31 | #define MODE_TARGET 0x02 |
|---|
| 47 | | - |
|---|
| 48 | | -#define DISABLE_CLUSTERING 0 |
|---|
| 49 | | -#define ENABLE_CLUSTERING 1 |
|---|
| 50 | 32 | |
|---|
| 51 | 33 | struct scsi_host_template { |
|---|
| 52 | 34 | struct module *module; |
|---|
| .. | .. |
|---|
| 66 | 48 | * |
|---|
| 67 | 49 | * Status: OPTIONAL |
|---|
| 68 | 50 | */ |
|---|
| 69 | | - int (* ioctl)(struct scsi_device *dev, int cmd, void __user *arg); |
|---|
| 51 | + int (*ioctl)(struct scsi_device *dev, unsigned int cmd, |
|---|
| 52 | + void __user *arg); |
|---|
| 70 | 53 | |
|---|
| 71 | 54 | |
|---|
| 72 | 55 | #ifdef CONFIG_COMPAT |
|---|
| .. | .. |
|---|
| 76 | 59 | * |
|---|
| 77 | 60 | * Status: OPTIONAL |
|---|
| 78 | 61 | */ |
|---|
| 79 | | - int (* compat_ioctl)(struct scsi_device *dev, int cmd, void __user *arg); |
|---|
| 62 | + int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd, |
|---|
| 63 | + void __user *arg); |
|---|
| 80 | 64 | #endif |
|---|
| 65 | + |
|---|
| 66 | + int (*init_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd); |
|---|
| 67 | + int (*exit_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd); |
|---|
| 81 | 68 | |
|---|
| 82 | 69 | /* |
|---|
| 83 | 70 | * The queuecommand function is used to queue up a scsi |
|---|
| 84 | 71 | * command block to the LLDD. When the driver finished |
|---|
| 85 | 72 | * processing the command the done callback is invoked. |
|---|
| 86 | 73 | * |
|---|
| 87 | | - * If queuecommand returns 0, then the HBA has accepted the |
|---|
| 88 | | - * command. The done() function must be called on the command |
|---|
| 74 | + * If queuecommand returns 0, then the driver has accepted the |
|---|
| 75 | + * command. It must also push it to the HBA if the scsi_cmnd |
|---|
| 76 | + * flag SCMD_LAST is set, or if the driver does not implement |
|---|
| 77 | + * commit_rqs. The done() function must be called on the command |
|---|
| 89 | 78 | * when the driver has finished with it. (you may call done on the |
|---|
| 90 | 79 | * command before queuecommand returns, but in this case you |
|---|
| 91 | 80 | * *must* return 0 from queuecommand). |
|---|
| .. | .. |
|---|
| 112 | 101 | * STATUS: REQUIRED |
|---|
| 113 | 102 | */ |
|---|
| 114 | 103 | int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *); |
|---|
| 104 | + |
|---|
| 105 | + /* |
|---|
| 106 | + * The commit_rqs function is used to trigger a hardware |
|---|
| 107 | + * doorbell after some requests have been queued with |
|---|
| 108 | + * queuecommand, when an error is encountered before sending |
|---|
| 109 | + * the request with SCMD_LAST set. |
|---|
| 110 | + * |
|---|
| 111 | + * STATUS: OPTIONAL |
|---|
| 112 | + */ |
|---|
| 113 | + void (*commit_rqs)(struct Scsi_Host *, u16); |
|---|
| 115 | 114 | |
|---|
| 116 | 115 | /* |
|---|
| 117 | 116 | * This is an error handling strategy routine. You don't need to |
|---|
| .. | .. |
|---|
| 273 | 272 | int (* map_queues)(struct Scsi_Host *shost); |
|---|
| 274 | 273 | |
|---|
| 275 | 274 | /* |
|---|
| 275 | + * Check if scatterlists need to be padded for DMA draining. |
|---|
| 276 | + * |
|---|
| 277 | + * Status: OPTIONAL |
|---|
| 278 | + */ |
|---|
| 279 | + bool (* dma_need_drain)(struct request *rq); |
|---|
| 280 | + |
|---|
| 281 | + /* |
|---|
| 276 | 282 | * This function determines the BIOS parameters for a given |
|---|
| 277 | 283 | * harddisk. These tend to be numbers that are made up by |
|---|
| 278 | 284 | * the host adapter. Parameters: |
|---|
| .. | .. |
|---|
| 304 | 310 | /* |
|---|
| 305 | 311 | * This is an optional routine that allows the transport to become |
|---|
| 306 | 312 | * involved when a scsi io timer fires. The return value tells the |
|---|
| 307 | | - * timer routine how to finish the io timeout handling: |
|---|
| 308 | | - * EH_HANDLED: I fixed the error, please complete the command |
|---|
| 309 | | - * EH_RESET_TIMER: I need more time, reset the timer and |
|---|
| 310 | | - * begin counting again |
|---|
| 311 | | - * EH_DONE: Begin normal error recovery |
|---|
| 313 | + * timer routine how to finish the io timeout handling. |
|---|
| 312 | 314 | * |
|---|
| 313 | 315 | * Status: OPTIONAL |
|---|
| 314 | 316 | */ |
|---|
| .. | .. |
|---|
| 341 | 343 | /* |
|---|
| 342 | 344 | * This determines if we will use a non-interrupt driven |
|---|
| 343 | 345 | * or an interrupt driven scheme. It is set to the maximum number |
|---|
| 344 | | - * of simultaneous commands a given host adapter will accept. |
|---|
| 346 | + * of simultaneous commands a single hw queue in HBA will accept. |
|---|
| 345 | 347 | */ |
|---|
| 346 | 348 | int can_queue; |
|---|
| 347 | 349 | |
|---|
| .. | .. |
|---|
| 367 | 369 | unsigned int max_sectors; |
|---|
| 368 | 370 | |
|---|
| 369 | 371 | /* |
|---|
| 372 | + * Maximum size in bytes of a single segment. |
|---|
| 373 | + */ |
|---|
| 374 | + unsigned int max_segment_size; |
|---|
| 375 | + |
|---|
| 376 | + /* |
|---|
| 370 | 377 | * DMA scatter gather segment boundary limit. A segment crossing this |
|---|
| 371 | 378 | * boundary will be split in two. |
|---|
| 372 | 379 | */ |
|---|
| 373 | 380 | unsigned long dma_boundary; |
|---|
| 381 | + |
|---|
| 382 | + unsigned long virt_boundary_mask; |
|---|
| 374 | 383 | |
|---|
| 375 | 384 | /* |
|---|
| 376 | 385 | * This specifies "machine infinity" for host templates which don't |
|---|
| .. | .. |
|---|
| 416 | 425 | unsigned unchecked_isa_dma:1; |
|---|
| 417 | 426 | |
|---|
| 418 | 427 | /* |
|---|
| 419 | | - * True if this host adapter can make good use of clustering. |
|---|
| 420 | | - * I originally thought that if the tablesize was large that it |
|---|
| 421 | | - * was a waste of CPU cycles to prepare a cluster list, but |
|---|
| 422 | | - * it works out that the Buslogic is faster if you use a smaller |
|---|
| 423 | | - * number of segments (i.e. use clustering). I guess it is |
|---|
| 424 | | - * inefficient. |
|---|
| 425 | | - */ |
|---|
| 426 | | - unsigned use_clustering:1; |
|---|
| 427 | | - |
|---|
| 428 | | - /* |
|---|
| 429 | 428 | * True for emulated SCSI host adapters (e.g. ATAPI). |
|---|
| 430 | 429 | */ |
|---|
| 431 | 430 | unsigned emulated:1; |
|---|
| .. | .. |
|---|
| 438 | 437 | /* True if the controller does not support WRITE SAME */ |
|---|
| 439 | 438 | unsigned no_write_same:1; |
|---|
| 440 | 439 | |
|---|
| 441 | | - /* True if the low-level driver supports blk-mq only */ |
|---|
| 442 | | - unsigned force_blk_mq:1; |
|---|
| 440 | + /* True if the host uses host-wide tagspace */ |
|---|
| 441 | + unsigned host_tagset:1; |
|---|
| 443 | 442 | |
|---|
| 444 | 443 | /* |
|---|
| 445 | 444 | * Countdown for host blocking with no commands outstanding. |
|---|
| .. | .. |
|---|
| 507 | 506 | unsigned long irq_flags; \ |
|---|
| 508 | 507 | int rc; \ |
|---|
| 509 | 508 | spin_lock_irqsave(shost->host_lock, irq_flags); \ |
|---|
| 510 | | - scsi_cmd_get_serial(shost, cmd); \ |
|---|
| 511 | 509 | rc = func_name##_lck (cmd, cmd->scsi_done); \ |
|---|
| 512 | 510 | spin_unlock_irqrestore(shost->host_lock, irq_flags); \ |
|---|
| 513 | 511 | return rc; \ |
|---|
| .. | .. |
|---|
| 556 | 554 | struct scsi_host_template *hostt; |
|---|
| 557 | 555 | struct scsi_transport_template *transportt; |
|---|
| 558 | 556 | |
|---|
| 559 | | - /* |
|---|
| 560 | | - * Area to keep a shared tag map (if needed, will be |
|---|
| 561 | | - * NULL if not). |
|---|
| 562 | | - */ |
|---|
| 563 | | - union { |
|---|
| 564 | | - struct blk_queue_tag *bqt; |
|---|
| 565 | | - struct blk_mq_tag_set tag_set; |
|---|
| 566 | | - }; |
|---|
| 557 | + /* Area to keep a shared tag map */ |
|---|
| 558 | + struct blk_mq_tag_set tag_set; |
|---|
| 567 | 559 | |
|---|
| 568 | | - atomic_t host_busy; /* commands actually active on low-level */ |
|---|
| 569 | 560 | atomic_t host_blocked; |
|---|
| 570 | 561 | |
|---|
| 571 | 562 | unsigned int host_failed; /* commands that failed. |
|---|
| .. | .. |
|---|
| 613 | 604 | short unsigned int sg_tablesize; |
|---|
| 614 | 605 | short unsigned int sg_prot_tablesize; |
|---|
| 615 | 606 | unsigned int max_sectors; |
|---|
| 607 | + unsigned int max_segment_size; |
|---|
| 616 | 608 | unsigned long dma_boundary; |
|---|
| 609 | + unsigned long virt_boundary_mask; |
|---|
| 617 | 610 | /* |
|---|
| 618 | 611 | * In scsi-mq mode, the number of hardware queues supported by the LLD. |
|---|
| 619 | 612 | * |
|---|
| 620 | 613 | * Note: it is assumed that each hardware queue has a queue depth of |
|---|
| 621 | 614 | * can_queue. In other words, the total queue depth per host |
|---|
| 622 | | - * is nr_hw_queues * can_queue. |
|---|
| 615 | + * is nr_hw_queues * can_queue. However, for when host_tagset is set, |
|---|
| 616 | + * the total queue depth is can_queue. |
|---|
| 623 | 617 | */ |
|---|
| 624 | 618 | unsigned nr_hw_queues; |
|---|
| 625 | | - /* |
|---|
| 626 | | - * Used to assign serial numbers to the cmds. |
|---|
| 627 | | - * Protected by the host lock. |
|---|
| 628 | | - */ |
|---|
| 629 | | - unsigned long cmd_serial_number; |
|---|
| 630 | | - |
|---|
| 631 | 619 | unsigned active_mode:2; |
|---|
| 632 | 620 | unsigned unchecked_isa_dma:1; |
|---|
| 633 | | - unsigned use_clustering:1; |
|---|
| 634 | 621 | |
|---|
| 635 | 622 | /* |
|---|
| 636 | 623 | * Host has requested that no further requests come through for the |
|---|
| .. | .. |
|---|
| 657 | 644 | /* The controller does not support WRITE SAME */ |
|---|
| 658 | 645 | unsigned no_write_same:1; |
|---|
| 659 | 646 | |
|---|
| 660 | | - unsigned use_blk_mq:1; |
|---|
| 661 | | - unsigned use_cmd_list:1; |
|---|
| 647 | + /* True if the host uses host-wide tagspace */ |
|---|
| 648 | + unsigned host_tagset:1; |
|---|
| 662 | 649 | |
|---|
| 663 | 650 | /* Host responded with short (<36 bytes) INQUIRY result */ |
|---|
| 664 | 651 | unsigned short_inquiry:1; |
|---|
| 665 | 652 | |
|---|
| 666 | | - /* |
|---|
| 667 | | - * Set "DBD" field in mode_sense caching mode page in case it is |
|---|
| 668 | | - * mandatory by LLD standard. |
|---|
| 669 | | - */ |
|---|
| 670 | | - unsigned set_dbd_for_caching:1; |
|---|
| 653 | + /* The transport requires the LUN bits NOT to be stored in CDB[1] */ |
|---|
| 654 | + unsigned no_scsi2_lun_in_cdb:1; |
|---|
| 671 | 655 | |
|---|
| 672 | 656 | /* |
|---|
| 673 | 657 | * Optional work queue to be utilized by the transport |
|---|
| .. | .. |
|---|
| 679 | 663 | * Task management function work queue |
|---|
| 680 | 664 | */ |
|---|
| 681 | 665 | struct workqueue_struct *tmf_work_q; |
|---|
| 682 | | - |
|---|
| 683 | | - /* The transport requires the LUN bits NOT to be stored in CDB[1] */ |
|---|
| 684 | | - unsigned no_scsi2_lun_in_cdb:1; |
|---|
| 685 | 666 | |
|---|
| 686 | 667 | /* |
|---|
| 687 | 668 | * Value host_blocked counts down from |
|---|
| .. | .. |
|---|
| 717 | 698 | */ |
|---|
| 718 | 699 | struct device *dma_dev; |
|---|
| 719 | 700 | |
|---|
| 701 | + ANDROID_KABI_RESERVE(1); |
|---|
| 702 | + |
|---|
| 720 | 703 | /* |
|---|
| 721 | 704 | * We should ensure that this is aligned, both for better performance |
|---|
| 722 | 705 | * and also because some compilers (m68k) don't automatically force |
|---|
| 723 | 706 | * alignment to a long boundary. |
|---|
| 724 | 707 | */ |
|---|
| 725 | | - unsigned long hostdata[0] /* Used for storage of host specific stuff */ |
|---|
| 708 | + unsigned long hostdata[] /* Used for storage of host specific stuff */ |
|---|
| 726 | 709 | __attribute__ ((aligned (sizeof(unsigned long)))); |
|---|
| 727 | 710 | }; |
|---|
| 728 | 711 | |
|---|
| .. | .. |
|---|
| 757 | 740 | shost->tmf_in_progress; |
|---|
| 758 | 741 | } |
|---|
| 759 | 742 | |
|---|
| 760 | | -static inline bool shost_use_blk_mq(struct Scsi_Host *shost) |
|---|
| 761 | | -{ |
|---|
| 762 | | - return shost->use_blk_mq; |
|---|
| 763 | | -} |
|---|
| 764 | | - |
|---|
| 765 | 743 | extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *); |
|---|
| 766 | 744 | extern void scsi_flush_work(struct Scsi_Host *); |
|---|
| 767 | 745 | |
|---|
| .. | .. |
|---|
| 777 | 755 | extern void scsi_host_put(struct Scsi_Host *t); |
|---|
| 778 | 756 | extern struct Scsi_Host *scsi_host_lookup(unsigned short); |
|---|
| 779 | 757 | extern const char *scsi_host_state_name(enum scsi_host_state); |
|---|
| 780 | | -extern void scsi_cmd_get_serial(struct Scsi_Host *, struct scsi_cmnd *); |
|---|
| 758 | +extern void scsi_host_complete_all_commands(struct Scsi_Host *shost, |
|---|
| 759 | + int status); |
|---|
| 781 | 760 | |
|---|
| 782 | 761 | static inline int __must_check scsi_add_host(struct Scsi_Host *host, |
|---|
| 783 | 762 | struct device *dev) |
|---|
| .. | .. |
|---|
| 802 | 781 | |
|---|
| 803 | 782 | extern void scsi_unblock_requests(struct Scsi_Host *); |
|---|
| 804 | 783 | extern void scsi_block_requests(struct Scsi_Host *); |
|---|
| 784 | +extern int scsi_host_block(struct Scsi_Host *shost); |
|---|
| 785 | +extern int scsi_host_unblock(struct Scsi_Host *shost, int new_state); |
|---|
| 786 | + |
|---|
| 787 | +void scsi_host_busy_iter(struct Scsi_Host *, |
|---|
| 788 | + bool (*fn)(struct scsi_cmnd *, void *, bool), void *priv); |
|---|
| 805 | 789 | |
|---|
| 806 | 790 | struct class_container; |
|---|
| 807 | 791 | |
|---|