xref: /linux/drivers/s390/cio/vfio_ccw_fsm.c (revision 908fc4c2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Finite state machine for vfio-ccw device handling
4  *
5  * Copyright IBM Corp. 2017
6  * Copyright Red Hat, Inc. 2019
7  *
8  * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
9  *            Cornelia Huck <cohuck@redhat.com>
10  */
11 
12 #include <linux/vfio.h>
13 #include <linux/mdev.h>
14 
15 #include "ioasm.h"
16 #include "vfio_ccw_private.h"
17 
18 static int fsm_io_helper(struct vfio_ccw_private *private)
19 {
20 	struct subchannel *sch;
21 	union orb *orb;
22 	int ccode;
23 	__u8 lpm;
24 	unsigned long flags;
25 	int ret;
26 
27 	sch = private->sch;
28 
29 	spin_lock_irqsave(sch->lock, flags);
30 
31 	orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
32 	if (!orb) {
33 		ret = -EIO;
34 		goto out;
35 	}
36 
37 	VFIO_CCW_TRACE_EVENT(5, "stIO");
38 	VFIO_CCW_TRACE_EVENT(5, dev_name(&sch->dev));
39 
40 	/* Issue "Start Subchannel" */
41 	ccode = ssch(sch->schid, orb);
42 
43 	VFIO_CCW_HEX_EVENT(5, &ccode, sizeof(ccode));
44 
45 	switch (ccode) {
46 	case 0:
47 		/*
48 		 * Initialize device status information
49 		 */
50 		sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
51 		ret = 0;
52 		private->state = VFIO_CCW_STATE_CP_PENDING;
53 		break;
54 	case 1:		/* Status pending */
55 	case 2:		/* Busy */
56 		ret = -EBUSY;
57 		break;
58 	case 3:		/* Device/path not operational */
59 	{
60 		lpm = orb->cmd.lpm;
61 		if (lpm != 0)
62 			sch->lpm &= ~lpm;
63 		else
64 			sch->lpm = 0;
65 
66 		if (cio_update_schib(sch))
67 			ret = -ENODEV;
68 		else
69 			ret = sch->lpm ? -EACCES : -ENODEV;
70 		break;
71 	}
72 	default:
73 		ret = ccode;
74 	}
75 out:
76 	spin_unlock_irqrestore(sch->lock, flags);
77 	return ret;
78 }
79 
80 static int fsm_do_halt(struct vfio_ccw_private *private)
81 {
82 	struct subchannel *sch;
83 	unsigned long flags;
84 	int ccode;
85 	int ret;
86 
87 	sch = private->sch;
88 
89 	spin_lock_irqsave(sch->lock, flags);
90 
91 	VFIO_CCW_TRACE_EVENT(2, "haltIO");
92 	VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
93 
94 	/* Issue "Halt Subchannel" */
95 	ccode = hsch(sch->schid);
96 
97 	VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode));
98 
99 	switch (ccode) {
100 	case 0:
101 		/*
102 		 * Initialize device status information
103 		 */
104 		sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
105 		ret = 0;
106 		break;
107 	case 1:		/* Status pending */
108 	case 2:		/* Busy */
109 		ret = -EBUSY;
110 		break;
111 	case 3:		/* Device not operational */
112 		ret = -ENODEV;
113 		break;
114 	default:
115 		ret = ccode;
116 	}
117 	spin_unlock_irqrestore(sch->lock, flags);
118 	return ret;
119 }
120 
121 static int fsm_do_clear(struct vfio_ccw_private *private)
122 {
123 	struct subchannel *sch;
124 	unsigned long flags;
125 	int ccode;
126 	int ret;
127 
128 	sch = private->sch;
129 
130 	spin_lock_irqsave(sch->lock, flags);
131 
132 	VFIO_CCW_TRACE_EVENT(2, "clearIO");
133 	VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
134 
135 	/* Issue "Clear Subchannel" */
136 	ccode = csch(sch->schid);
137 
138 	VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode));
139 
140 	switch (ccode) {
141 	case 0:
142 		/*
143 		 * Initialize device status information
144 		 */
145 		sch->schib.scsw.cmd.actl = SCSW_ACTL_CLEAR_PEND;
146 		/* TODO: check what else we might need to clear */
147 		ret = 0;
148 		break;
149 	case 3:		/* Device not operational */
150 		ret = -ENODEV;
151 		break;
152 	default:
153 		ret = ccode;
154 	}
155 	spin_unlock_irqrestore(sch->lock, flags);
156 	return ret;
157 }
158 
159 static void fsm_notoper(struct vfio_ccw_private *private,
160 			enum vfio_ccw_event event)
161 {
162 	struct subchannel *sch = private->sch;
163 
164 	VFIO_CCW_TRACE_EVENT(2, "notoper");
165 	VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
166 
167 	/*
168 	 * TODO:
169 	 * Probably we should send the machine check to the guest.
170 	 */
171 	css_sched_sch_todo(sch, SCH_TODO_UNREG);
172 	private->state = VFIO_CCW_STATE_NOT_OPER;
173 }
174 
175 /*
176  * No operation action.
177  */
178 static void fsm_nop(struct vfio_ccw_private *private,
179 		    enum vfio_ccw_event event)
180 {
181 }
182 
183 static void fsm_io_error(struct vfio_ccw_private *private,
184 			 enum vfio_ccw_event event)
185 {
186 	pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state);
187 	private->io_region->ret_code = -EIO;
188 }
189 
190 static void fsm_io_busy(struct vfio_ccw_private *private,
191 			enum vfio_ccw_event event)
192 {
193 	private->io_region->ret_code = -EBUSY;
194 }
195 
196 static void fsm_io_retry(struct vfio_ccw_private *private,
197 			 enum vfio_ccw_event event)
198 {
199 	private->io_region->ret_code = -EAGAIN;
200 }
201 
202 static void fsm_async_error(struct vfio_ccw_private *private,
203 			    enum vfio_ccw_event event)
204 {
205 	struct ccw_cmd_region *cmd_region = private->cmd_region;
206 
207 	pr_err("vfio-ccw: FSM: %s request from state:%d\n",
208 	       cmd_region->command == VFIO_CCW_ASYNC_CMD_HSCH ? "halt" :
209 	       cmd_region->command == VFIO_CCW_ASYNC_CMD_CSCH ? "clear" :
210 	       "<unknown>", private->state);
211 	cmd_region->ret_code = -EIO;
212 }
213 
214 static void fsm_async_retry(struct vfio_ccw_private *private,
215 			    enum vfio_ccw_event event)
216 {
217 	private->cmd_region->ret_code = -EAGAIN;
218 }
219 
220 static void fsm_disabled_irq(struct vfio_ccw_private *private,
221 			     enum vfio_ccw_event event)
222 {
223 	struct subchannel *sch = private->sch;
224 
225 	/*
226 	 * An interrupt in a disabled state means a previous disable was not
227 	 * successful - should not happen, but we try to disable again.
228 	 */
229 	cio_disable_subchannel(sch);
230 }
231 inline struct subchannel_id get_schid(struct vfio_ccw_private *p)
232 {
233 	return p->sch->schid;
234 }
235 
236 /*
237  * Deal with the ccw command request from the userspace.
238  */
239 static void fsm_io_request(struct vfio_ccw_private *private,
240 			   enum vfio_ccw_event event)
241 {
242 	union orb *orb;
243 	union scsw *scsw = &private->scsw;
244 	struct ccw_io_region *io_region = private->io_region;
245 	struct mdev_device *mdev = private->mdev;
246 	char *errstr = "request";
247 	struct subchannel_id schid = get_schid(private);
248 
249 	private->state = VFIO_CCW_STATE_CP_PROCESSING;
250 	memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
251 
252 	if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
253 		orb = (union orb *)io_region->orb_area;
254 
255 		/* Don't try to build a cp if transport mode is specified. */
256 		if (orb->tm.b) {
257 			io_region->ret_code = -EOPNOTSUPP;
258 			VFIO_CCW_MSG_EVENT(2,
259 					   "%pUl (%x.%x.%04x): transport mode\n",
260 					   mdev_uuid(mdev), schid.cssid,
261 					   schid.ssid, schid.sch_no);
262 			errstr = "transport mode";
263 			goto err_out;
264 		}
265 		io_region->ret_code = cp_init(&private->cp, orb);
266 		if (io_region->ret_code) {
267 			VFIO_CCW_MSG_EVENT(2,
268 					   "%pUl (%x.%x.%04x): cp_init=%d\n",
269 					   mdev_uuid(mdev), schid.cssid,
270 					   schid.ssid, schid.sch_no,
271 					   io_region->ret_code);
272 			errstr = "cp init";
273 			goto err_out;
274 		}
275 
276 		io_region->ret_code = cp_prefetch(&private->cp);
277 		if (io_region->ret_code) {
278 			VFIO_CCW_MSG_EVENT(2,
279 					   "%pUl (%x.%x.%04x): cp_prefetch=%d\n",
280 					   mdev_uuid(mdev), schid.cssid,
281 					   schid.ssid, schid.sch_no,
282 					   io_region->ret_code);
283 			errstr = "cp prefetch";
284 			cp_free(&private->cp);
285 			goto err_out;
286 		}
287 
288 		/* Start channel program and wait for I/O interrupt. */
289 		io_region->ret_code = fsm_io_helper(private);
290 		if (io_region->ret_code) {
291 			VFIO_CCW_MSG_EVENT(2,
292 					   "%pUl (%x.%x.%04x): fsm_io_helper=%d\n",
293 					   mdev_uuid(mdev), schid.cssid,
294 					   schid.ssid, schid.sch_no,
295 					   io_region->ret_code);
296 			errstr = "cp fsm_io_helper";
297 			cp_free(&private->cp);
298 			goto err_out;
299 		}
300 		return;
301 	} else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
302 		VFIO_CCW_MSG_EVENT(2,
303 				   "%pUl (%x.%x.%04x): halt on io_region\n",
304 				   mdev_uuid(mdev), schid.cssid,
305 				   schid.ssid, schid.sch_no);
306 		/* halt is handled via the async cmd region */
307 		io_region->ret_code = -EOPNOTSUPP;
308 		goto err_out;
309 	} else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
310 		VFIO_CCW_MSG_EVENT(2,
311 				   "%pUl (%x.%x.%04x): clear on io_region\n",
312 				   mdev_uuid(mdev), schid.cssid,
313 				   schid.ssid, schid.sch_no);
314 		/* clear is handled via the async cmd region */
315 		io_region->ret_code = -EOPNOTSUPP;
316 		goto err_out;
317 	}
318 
319 err_out:
320 	private->state = VFIO_CCW_STATE_IDLE;
321 	trace_vfio_ccw_fsm_io_request(scsw->cmd.fctl, schid,
322 				      io_region->ret_code, errstr);
323 }
324 
325 /*
326  * Deal with an async request from userspace.
327  */
328 static void fsm_async_request(struct vfio_ccw_private *private,
329 			      enum vfio_ccw_event event)
330 {
331 	struct ccw_cmd_region *cmd_region = private->cmd_region;
332 
333 	switch (cmd_region->command) {
334 	case VFIO_CCW_ASYNC_CMD_HSCH:
335 		cmd_region->ret_code = fsm_do_halt(private);
336 		break;
337 	case VFIO_CCW_ASYNC_CMD_CSCH:
338 		cmd_region->ret_code = fsm_do_clear(private);
339 		break;
340 	default:
341 		/* should not happen? */
342 		cmd_region->ret_code = -EINVAL;
343 	}
344 
345 	trace_vfio_ccw_fsm_async_request(get_schid(private),
346 					 cmd_region->command,
347 					 cmd_region->ret_code);
348 }
349 
350 /*
351  * Got an interrupt for a normal io (state busy).
352  */
353 static void fsm_irq(struct vfio_ccw_private *private,
354 		    enum vfio_ccw_event event)
355 {
356 	struct irb *irb = this_cpu_ptr(&cio_irb);
357 
358 	VFIO_CCW_TRACE_EVENT(6, "IRQ");
359 	VFIO_CCW_TRACE_EVENT(6, dev_name(&private->sch->dev));
360 
361 	memcpy(&private->irb, irb, sizeof(*irb));
362 
363 	queue_work(vfio_ccw_work_q, &private->io_work);
364 
365 	if (private->completion)
366 		complete(private->completion);
367 }
368 
369 /*
370  * Device statemachine
371  */
372 fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
373 	[VFIO_CCW_STATE_NOT_OPER] = {
374 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_nop,
375 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_error,
376 		[VFIO_CCW_EVENT_ASYNC_REQ]	= fsm_async_error,
377 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_disabled_irq,
378 	},
379 	[VFIO_CCW_STATE_STANDBY] = {
380 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
381 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_error,
382 		[VFIO_CCW_EVENT_ASYNC_REQ]	= fsm_async_error,
383 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
384 	},
385 	[VFIO_CCW_STATE_IDLE] = {
386 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
387 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_request,
388 		[VFIO_CCW_EVENT_ASYNC_REQ]	= fsm_async_request,
389 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
390 	},
391 	[VFIO_CCW_STATE_CP_PROCESSING] = {
392 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
393 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_retry,
394 		[VFIO_CCW_EVENT_ASYNC_REQ]	= fsm_async_retry,
395 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
396 	},
397 	[VFIO_CCW_STATE_CP_PENDING] = {
398 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
399 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_busy,
400 		[VFIO_CCW_EVENT_ASYNC_REQ]	= fsm_async_request,
401 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
402 	},
403 };
404