hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/s390/cio/vfio_ccw_fsm.c
....@@ -3,8 +3,10 @@
33 * Finite state machine for vfio-ccw device handling
44 *
55 * Copyright IBM Corp. 2017
6
+ * Copyright Red Hat, Inc. 2019
67 *
78 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
9
+ * Cornelia Huck <cohuck@redhat.com>
810 */
911
1012 #include <linux/vfio.h>
....@@ -12,9 +14,6 @@
1214
1315 #include "ioasm.h"
1416 #include "vfio_ccw_private.h"
15
-
16
-#define CREATE_TRACE_POINTS
17
-#include "vfio_ccw_trace.h"
1817
1918 static int fsm_io_helper(struct vfio_ccw_private *private)
2019 {
....@@ -28,12 +27,20 @@
2827 sch = private->sch;
2928
3029 spin_lock_irqsave(sch->lock, flags);
31
- private->state = VFIO_CCW_STATE_BUSY;
3230
3331 orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
32
+ if (!orb) {
33
+ ret = -EIO;
34
+ goto out;
35
+ }
36
+
37
+ VFIO_CCW_TRACE_EVENT(5, "stIO");
38
+ VFIO_CCW_TRACE_EVENT(5, dev_name(&sch->dev));
3439
3540 /* Issue "Start Subchannel" */
3641 ccode = ssch(sch->schid, orb);
42
+
43
+ VFIO_CCW_HEX_EVENT(5, &ccode, sizeof(ccode));
3744
3845 switch (ccode) {
3946 case 0:
....@@ -42,6 +49,7 @@
4249 */
4350 sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
4451 ret = 0;
52
+ private->state = VFIO_CCW_STATE_CP_PENDING;
4553 break;
4654 case 1: /* Status pending */
4755 case 2: /* Busy */
....@@ -64,6 +72,86 @@
6472 default:
6573 ret = ccode;
6674 }
75
+out:
76
+ spin_unlock_irqrestore(sch->lock, flags);
77
+ return ret;
78
+}
79
+
80
+static int fsm_do_halt(struct vfio_ccw_private *private)
81
+{
82
+ struct subchannel *sch;
83
+ unsigned long flags;
84
+ int ccode;
85
+ int ret;
86
+
87
+ sch = private->sch;
88
+
89
+ spin_lock_irqsave(sch->lock, flags);
90
+
91
+ VFIO_CCW_TRACE_EVENT(2, "haltIO");
92
+ VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
93
+
94
+ /* Issue "Halt Subchannel" */
95
+ ccode = hsch(sch->schid);
96
+
97
+ VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode));
98
+
99
+ switch (ccode) {
100
+ case 0:
101
+ /*
102
+ * Initialize device status information
103
+ */
104
+ sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
105
+ ret = 0;
106
+ break;
107
+ case 1: /* Status pending */
108
+ case 2: /* Busy */
109
+ ret = -EBUSY;
110
+ break;
111
+ case 3: /* Device not operational */
112
+ ret = -ENODEV;
113
+ break;
114
+ default:
115
+ ret = ccode;
116
+ }
117
+ spin_unlock_irqrestore(sch->lock, flags);
118
+ return ret;
119
+}
120
+
121
+static int fsm_do_clear(struct vfio_ccw_private *private)
122
+{
123
+ struct subchannel *sch;
124
+ unsigned long flags;
125
+ int ccode;
126
+ int ret;
127
+
128
+ sch = private->sch;
129
+
130
+ spin_lock_irqsave(sch->lock, flags);
131
+
132
+ VFIO_CCW_TRACE_EVENT(2, "clearIO");
133
+ VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
134
+
135
+ /* Issue "Clear Subchannel" */
136
+ ccode = csch(sch->schid);
137
+
138
+ VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode));
139
+
140
+ switch (ccode) {
141
+ case 0:
142
+ /*
143
+ * Initialize device status information
144
+ */
145
+ sch->schib.scsw.cmd.actl = SCSW_ACTL_CLEAR_PEND;
146
+ /* TODO: check what else we might need to clear */
147
+ ret = 0;
148
+ break;
149
+ case 3: /* Device not operational */
150
+ ret = -ENODEV;
151
+ break;
152
+ default:
153
+ ret = ccode;
154
+ }
67155 spin_unlock_irqrestore(sch->lock, flags);
68156 return ret;
69157 }
....@@ -72,6 +160,9 @@
72160 enum vfio_ccw_event event)
73161 {
74162 struct subchannel *sch = private->sch;
163
+
164
+ VFIO_CCW_TRACE_EVENT(2, "notoper");
165
+ VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
75166
76167 /*
77168 * TODO:
....@@ -102,6 +193,30 @@
102193 private->io_region->ret_code = -EBUSY;
103194 }
104195
196
+static void fsm_io_retry(struct vfio_ccw_private *private,
197
+ enum vfio_ccw_event event)
198
+{
199
+ private->io_region->ret_code = -EAGAIN;
200
+}
201
+
202
+static void fsm_async_error(struct vfio_ccw_private *private,
203
+ enum vfio_ccw_event event)
204
+{
205
+ struct ccw_cmd_region *cmd_region = private->cmd_region;
206
+
207
+ pr_err("vfio-ccw: FSM: %s request from state:%d\n",
208
+ cmd_region->command == VFIO_CCW_ASYNC_CMD_HSCH ? "halt" :
209
+ cmd_region->command == VFIO_CCW_ASYNC_CMD_CSCH ? "clear" :
210
+ "<unknown>", private->state);
211
+ cmd_region->ret_code = -EIO;
212
+}
213
+
214
+static void fsm_async_retry(struct vfio_ccw_private *private,
215
+ enum vfio_ccw_event event)
216
+{
217
+ private->cmd_region->ret_code = -EAGAIN;
218
+}
219
+
105220 static void fsm_disabled_irq(struct vfio_ccw_private *private,
106221 enum vfio_ccw_event event)
107222 {
....@@ -129,9 +244,9 @@
129244 struct ccw_io_region *io_region = private->io_region;
130245 struct mdev_device *mdev = private->mdev;
131246 char *errstr = "request";
247
+ struct subchannel_id schid = get_schid(private);
132248
133
- private->state = VFIO_CCW_STATE_BOXED;
134
-
249
+ private->state = VFIO_CCW_STATE_CP_PROCESSING;
135250 memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
136251
137252 if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
....@@ -140,18 +255,32 @@
140255 /* Don't try to build a cp if transport mode is specified. */
141256 if (orb->tm.b) {
142257 io_region->ret_code = -EOPNOTSUPP;
258
+ VFIO_CCW_MSG_EVENT(2,
259
+ "%pUl (%x.%x.%04x): transport mode\n",
260
+ mdev_uuid(mdev), schid.cssid,
261
+ schid.ssid, schid.sch_no);
143262 errstr = "transport mode";
144263 goto err_out;
145264 }
146265 io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
147266 orb);
148267 if (io_region->ret_code) {
268
+ VFIO_CCW_MSG_EVENT(2,
269
+ "%pUl (%x.%x.%04x): cp_init=%d\n",
270
+ mdev_uuid(mdev), schid.cssid,
271
+ schid.ssid, schid.sch_no,
272
+ io_region->ret_code);
149273 errstr = "cp init";
150274 goto err_out;
151275 }
152276
153277 io_region->ret_code = cp_prefetch(&private->cp);
154278 if (io_region->ret_code) {
279
+ VFIO_CCW_MSG_EVENT(2,
280
+ "%pUl (%x.%x.%04x): cp_prefetch=%d\n",
281
+ mdev_uuid(mdev), schid.cssid,
282
+ schid.ssid, schid.sch_no,
283
+ io_region->ret_code);
155284 errstr = "cp prefetch";
156285 cp_free(&private->cp);
157286 goto err_out;
....@@ -160,25 +289,63 @@
160289 /* Start channel program and wait for I/O interrupt. */
161290 io_region->ret_code = fsm_io_helper(private);
162291 if (io_region->ret_code) {
292
+ VFIO_CCW_MSG_EVENT(2,
293
+ "%pUl (%x.%x.%04x): fsm_io_helper=%d\n",
294
+ mdev_uuid(mdev), schid.cssid,
295
+ schid.ssid, schid.sch_no,
296
+ io_region->ret_code);
163297 errstr = "cp fsm_io_helper";
164298 cp_free(&private->cp);
165299 goto err_out;
166300 }
167301 return;
168302 } else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
169
- /* XXX: Handle halt. */
303
+ VFIO_CCW_MSG_EVENT(2,
304
+ "%pUl (%x.%x.%04x): halt on io_region\n",
305
+ mdev_uuid(mdev), schid.cssid,
306
+ schid.ssid, schid.sch_no);
307
+ /* halt is handled via the async cmd region */
170308 io_region->ret_code = -EOPNOTSUPP;
171309 goto err_out;
172310 } else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
173
- /* XXX: Handle clear. */
311
+ VFIO_CCW_MSG_EVENT(2,
312
+ "%pUl (%x.%x.%04x): clear on io_region\n",
313
+ mdev_uuid(mdev), schid.cssid,
314
+ schid.ssid, schid.sch_no);
315
+ /* clear is handled via the async cmd region */
174316 io_region->ret_code = -EOPNOTSUPP;
175317 goto err_out;
176318 }
177319
178320 err_out:
179321 private->state = VFIO_CCW_STATE_IDLE;
180
- trace_vfio_ccw_io_fctl(scsw->cmd.fctl, get_schid(private),
181
- io_region->ret_code, errstr);
322
+ trace_vfio_ccw_fsm_io_request(scsw->cmd.fctl, schid,
323
+ io_region->ret_code, errstr);
324
+}
325
+
326
+/*
327
+ * Deal with an async request from userspace.
328
+ */
329
+static void fsm_async_request(struct vfio_ccw_private *private,
330
+ enum vfio_ccw_event event)
331
+{
332
+ struct ccw_cmd_region *cmd_region = private->cmd_region;
333
+
334
+ switch (cmd_region->command) {
335
+ case VFIO_CCW_ASYNC_CMD_HSCH:
336
+ cmd_region->ret_code = fsm_do_halt(private);
337
+ break;
338
+ case VFIO_CCW_ASYNC_CMD_CSCH:
339
+ cmd_region->ret_code = fsm_do_clear(private);
340
+ break;
341
+ default:
342
+ /* should not happen? */
343
+ cmd_region->ret_code = -EINVAL;
344
+ }
345
+
346
+ trace_vfio_ccw_fsm_async_request(get_schid(private),
347
+ cmd_region->command,
348
+ cmd_region->ret_code);
182349 }
183350
184351 /*
....@@ -188,6 +355,9 @@
188355 enum vfio_ccw_event event)
189356 {
190357 struct irb *irb = this_cpu_ptr(&cio_irb);
358
+
359
+ VFIO_CCW_TRACE_EVENT(6, "IRQ");
360
+ VFIO_CCW_TRACE_EVENT(6, dev_name(&private->sch->dev));
191361
192362 memcpy(&private->irb, irb, sizeof(*irb));
193363
....@@ -204,26 +374,31 @@
204374 [VFIO_CCW_STATE_NOT_OPER] = {
205375 [VFIO_CCW_EVENT_NOT_OPER] = fsm_nop,
206376 [VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
377
+ [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
207378 [VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
208379 },
209380 [VFIO_CCW_STATE_STANDBY] = {
210381 [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
211382 [VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
383
+ [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
212384 [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
213385 },
214386 [VFIO_CCW_STATE_IDLE] = {
215387 [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
216388 [VFIO_CCW_EVENT_IO_REQ] = fsm_io_request,
389
+ [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
217390 [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
218391 },
219
- [VFIO_CCW_STATE_BOXED] = {
392
+ [VFIO_CCW_STATE_CP_PROCESSING] = {
220393 [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
221
- [VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy,
394
+ [VFIO_CCW_EVENT_IO_REQ] = fsm_io_retry,
395
+ [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_retry,
222396 [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
223397 },
224
- [VFIO_CCW_STATE_BUSY] = {
398
+ [VFIO_CCW_STATE_CP_PENDING] = {
225399 [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
226400 [VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy,
401
+ [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
227402 [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
228403 },
229404 };