hc
2024-01-31 f9004dbfff8a3fbbd7e2a88c8a4327c7f2f8e5b2
kernel/include/scsi/scsi_host.h
....@@ -12,7 +12,6 @@
1212 #include <scsi/scsi.h>
1313 #include <linux/android_kabi.h>
1414
15
-struct request_queue;
1615 struct block_device;
1716 struct completion;
1817 struct module;
....@@ -23,30 +22,13 @@
2322 struct Scsi_Host;
2423 struct scsi_host_cmd_pool;
2524 struct scsi_transport_template;
26
-struct blk_queue_tags;
2725
2826
29
-/*
30
- * The various choices mean:
31
- * NONE: Self evident. Host adapter is not capable of scatter-gather.
32
- * ALL: Means that the host adapter module can do scatter-gather,
33
- * and that there is no limit to the size of the table to which
34
- * we scatter/gather data. The value we set here is the maximum
35
- * single element sglist. To use chained sglists, the adapter
36
- * has to set a value beyond ALL (and correctly use the chain
37
- * handling API.
38
- * Anything else: Indicates the maximum number of chains that can be
39
- * used in one scatter-gather request.
40
- */
41
-#define SG_NONE 0
4227 #define SG_ALL SG_CHUNK_SIZE
4328
4429 #define MODE_UNKNOWN 0x00
4530 #define MODE_INITIATOR 0x01
4631 #define MODE_TARGET 0x02
47
-
48
-#define DISABLE_CLUSTERING 0
49
-#define ENABLE_CLUSTERING 1
5032
5133 struct scsi_host_template {
5234 struct module *module;
....@@ -66,7 +48,8 @@
6648 *
6749 * Status: OPTIONAL
6850 */
69
- int (* ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
51
+ int (*ioctl)(struct scsi_device *dev, unsigned int cmd,
52
+ void __user *arg);
7053
7154
7255 #ifdef CONFIG_COMPAT
....@@ -76,16 +59,22 @@
7659 *
7760 * Status: OPTIONAL
7861 */
79
- int (* compat_ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
62
+ int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd,
63
+ void __user *arg);
8064 #endif
65
+
66
+ int (*init_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
67
+ int (*exit_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
8168
8269 /*
8370 * The queuecommand function is used to queue up a scsi
8471 * command block to the LLDD. When the driver finished
8572 * processing the command the done callback is invoked.
8673 *
87
- * If queuecommand returns 0, then the HBA has accepted the
88
- * command. The done() function must be called on the command
74
+ * If queuecommand returns 0, then the driver has accepted the
75
+ * command. It must also push it to the HBA if the scsi_cmnd
76
+ * flag SCMD_LAST is set, or if the driver does not implement
77
+ * commit_rqs. The done() function must be called on the command
8978 * when the driver has finished with it. (you may call done on the
9079 * command before queuecommand returns, but in this case you
9180 * *must* return 0 from queuecommand).
....@@ -112,6 +101,16 @@
112101 * STATUS: REQUIRED
113102 */
114103 int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
104
+
105
+ /*
106
+ * The commit_rqs function is used to trigger a hardware
107
+ * doorbell after some requests have been queued with
108
+ * queuecommand, when an error is encountered before sending
109
+ * the request with SCMD_LAST set.
110
+ *
111
+ * STATUS: OPTIONAL
112
+ */
113
+ void (*commit_rqs)(struct Scsi_Host *, u16);
115114
116115 /*
117116 * This is an error handling strategy routine. You don't need to
....@@ -273,6 +272,13 @@
273272 int (* map_queues)(struct Scsi_Host *shost);
274273
275274 /*
275
+ * Check if scatterlists need to be padded for DMA draining.
276
+ *
277
+ * Status: OPTIONAL
278
+ */
279
+ bool (* dma_need_drain)(struct request *rq);
280
+
281
+ /*
276282 * This function determines the BIOS parameters for a given
277283 * harddisk. These tend to be numbers that are made up by
278284 * the host adapter. Parameters:
....@@ -304,11 +310,7 @@
304310 /*
305311 * This is an optional routine that allows the transport to become
306312 * involved when a scsi io timer fires. The return value tells the
307
- * timer routine how to finish the io timeout handling:
308
- * EH_HANDLED: I fixed the error, please complete the command
309
- * EH_RESET_TIMER: I need more time, reset the timer and
310
- * begin counting again
311
- * EH_DONE: Begin normal error recovery
313
+ * timer routine how to finish the io timeout handling.
312314 *
313315 * Status: OPTIONAL
314316 */
....@@ -341,7 +343,7 @@
341343 /*
342344 * This determines if we will use a non-interrupt driven
343345 * or an interrupt driven scheme. It is set to the maximum number
344
- * of simultaneous commands a given host adapter will accept.
346
+ * of simultaneous commands a single hw queue in HBA will accept.
345347 */
346348 int can_queue;
347349
....@@ -367,10 +369,17 @@
367369 unsigned int max_sectors;
368370
369371 /*
372
+ * Maximum size in bytes of a single segment.
373
+ */
374
+ unsigned int max_segment_size;
375
+
376
+ /*
370377 * DMA scatter gather segment boundary limit. A segment crossing this
371378 * boundary will be split in two.
372379 */
373380 unsigned long dma_boundary;
381
+
382
+ unsigned long virt_boundary_mask;
374383
375384 /*
376385 * This specifies "machine infinity" for host templates which don't
....@@ -416,16 +425,6 @@
416425 unsigned unchecked_isa_dma:1;
417426
418427 /*
419
- * True if this host adapter can make good use of clustering.
420
- * I originally thought that if the tablesize was large that it
421
- * was a waste of CPU cycles to prepare a cluster list, but
422
- * it works out that the Buslogic is faster if you use a smaller
423
- * number of segments (i.e. use clustering). I guess it is
424
- * inefficient.
425
- */
426
- unsigned use_clustering:1;
427
-
428
- /*
429428 * True for emulated SCSI host adapters (e.g. ATAPI).
430429 */
431430 unsigned emulated:1;
....@@ -438,8 +437,8 @@
438437 /* True if the controller does not support WRITE SAME */
439438 unsigned no_write_same:1;
440439
441
- /* True if the low-level driver supports blk-mq only */
442
- unsigned force_blk_mq:1;
440
+ /* True if the host uses host-wide tagspace */
441
+ unsigned host_tagset:1;
443442
444443 /*
445444 * Countdown for host blocking with no commands outstanding.
....@@ -507,7 +506,6 @@
507506 unsigned long irq_flags; \
508507 int rc; \
509508 spin_lock_irqsave(shost->host_lock, irq_flags); \
510
- scsi_cmd_get_serial(shost, cmd); \
511509 rc = func_name##_lck (cmd, cmd->scsi_done); \
512510 spin_unlock_irqrestore(shost->host_lock, irq_flags); \
513511 return rc; \
....@@ -556,16 +554,9 @@
556554 struct scsi_host_template *hostt;
557555 struct scsi_transport_template *transportt;
558556
559
- /*
560
- * Area to keep a shared tag map (if needed, will be
561
- * NULL if not).
562
- */
563
- union {
564
- struct blk_queue_tag *bqt;
565
- struct blk_mq_tag_set tag_set;
566
- };
557
+ /* Area to keep a shared tag map */
558
+ struct blk_mq_tag_set tag_set;
567559
568
- atomic_t host_busy; /* commands actually active on low-level */
569560 atomic_t host_blocked;
570561
571562 unsigned int host_failed; /* commands that failed.
....@@ -613,24 +604,20 @@
613604 short unsigned int sg_tablesize;
614605 short unsigned int sg_prot_tablesize;
615606 unsigned int max_sectors;
607
+ unsigned int max_segment_size;
616608 unsigned long dma_boundary;
609
+ unsigned long virt_boundary_mask;
617610 /*
618611 * In scsi-mq mode, the number of hardware queues supported by the LLD.
619612 *
620613 * Note: it is assumed that each hardware queue has a queue depth of
621614 * can_queue. In other words, the total queue depth per host
622
- * is nr_hw_queues * can_queue.
615
+ * is nr_hw_queues * can_queue. However, for when host_tagset is set,
616
+ * the total queue depth is can_queue.
623617 */
624618 unsigned nr_hw_queues;
625
- /*
626
- * Used to assign serial numbers to the cmds.
627
- * Protected by the host lock.
628
- */
629
- unsigned long cmd_serial_number;
630
-
631619 unsigned active_mode:2;
632620 unsigned unchecked_isa_dma:1;
633
- unsigned use_clustering:1;
634621
635622 /*
636623 * Host has requested that no further requests come through for the
....@@ -657,17 +644,14 @@
657644 /* The controller does not support WRITE SAME */
658645 unsigned no_write_same:1;
659646
660
- unsigned use_blk_mq:1;
661
- unsigned use_cmd_list:1;
647
+ /* True if the host uses host-wide tagspace */
648
+ unsigned host_tagset:1;
662649
663650 /* Host responded with short (<36 bytes) INQUIRY result */
664651 unsigned short_inquiry:1;
665652
666
- /*
667
- * Set "DBD" field in mode_sense caching mode page in case it is
668
- * mandatory by LLD standard.
669
- */
670
- unsigned set_dbd_for_caching:1;
653
+ /* The transport requires the LUN bits NOT to be stored in CDB[1] */
654
+ unsigned no_scsi2_lun_in_cdb:1;
671655
672656 /*
673657 * Optional work queue to be utilized by the transport
....@@ -679,9 +663,6 @@
679663 * Task management function work queue
680664 */
681665 struct workqueue_struct *tmf_work_q;
682
-
683
- /* The transport requires the LUN bits NOT to be stored in CDB[1] */
684
- unsigned no_scsi2_lun_in_cdb:1;
685666
686667 /*
687668 * Value host_blocked counts down from
....@@ -717,12 +698,14 @@
717698 */
718699 struct device *dma_dev;
719700
701
+ ANDROID_KABI_RESERVE(1);
702
+
720703 /*
721704 * We should ensure that this is aligned, both for better performance
722705 * and also because some compilers (m68k) don't automatically force
723706 * alignment to a long boundary.
724707 */
725
- unsigned long hostdata[0] /* Used for storage of host specific stuff */
708
+ unsigned long hostdata[] /* Used for storage of host specific stuff */
726709 __attribute__ ((aligned (sizeof(unsigned long))));
727710 };
728711
....@@ -757,11 +740,6 @@
757740 shost->tmf_in_progress;
758741 }
759742
760
-static inline bool shost_use_blk_mq(struct Scsi_Host *shost)
761
-{
762
- return shost->use_blk_mq;
763
-}
764
-
765743 extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
766744 extern void scsi_flush_work(struct Scsi_Host *);
767745
....@@ -777,7 +755,8 @@
777755 extern void scsi_host_put(struct Scsi_Host *t);
778756 extern struct Scsi_Host *scsi_host_lookup(unsigned short);
779757 extern const char *scsi_host_state_name(enum scsi_host_state);
780
-extern void scsi_cmd_get_serial(struct Scsi_Host *, struct scsi_cmnd *);
758
+extern void scsi_host_complete_all_commands(struct Scsi_Host *shost,
759
+ int status);
781760
782761 static inline int __must_check scsi_add_host(struct Scsi_Host *host,
783762 struct device *dev)
....@@ -802,6 +781,11 @@
802781
803782 extern void scsi_unblock_requests(struct Scsi_Host *);
804783 extern void scsi_block_requests(struct Scsi_Host *);
784
+extern int scsi_host_block(struct Scsi_Host *shost);
785
+extern int scsi_host_unblock(struct Scsi_Host *shost, int new_state);
786
+
787
+void scsi_host_busy_iter(struct Scsi_Host *,
788
+ bool (*fn)(struct scsi_cmnd *, void *, bool), void *priv);
805789
806790 struct class_container;
807791