1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Finite state machine for vfio-ccw device handling
4  *
5  * Copyright IBM Corp. 2017
6  * Copyright Red Hat, Inc. 2019
7  *
8  * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
9  *            Cornelia Huck <cohuck@redhat.com>
10  */
11 
12 #include <linux/vfio.h>
13 #include <linux/mdev.h>
14 
15 #include "ioasm.h"
16 #include "vfio_ccw_private.h"
17 
fsm_io_helper(struct vfio_ccw_private * private)18 static int fsm_io_helper(struct vfio_ccw_private *private)
19 {
20 	struct subchannel *sch;
21 	union orb *orb;
22 	int ccode;
23 	__u8 lpm;
24 	unsigned long flags;
25 	int ret;
26 
27 	sch = private->sch;
28 
29 	spin_lock_irqsave(sch->lock, flags);
30 
31 	orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
32 	if (!orb) {
33 		ret = -EIO;
34 		goto out;
35 	}
36 
37 	VFIO_CCW_TRACE_EVENT(5, "stIO");
38 	VFIO_CCW_TRACE_EVENT(5, dev_name(&sch->dev));
39 
40 	/* Issue "Start Subchannel" */
41 	ccode = ssch(sch->schid, orb);
42 
43 	VFIO_CCW_HEX_EVENT(5, &ccode, sizeof(ccode));
44 
45 	switch (ccode) {
46 	case 0:
47 		/*
48 		 * Initialize device status information
49 		 */
50 		sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
51 		ret = 0;
52 		private->state = VFIO_CCW_STATE_CP_PENDING;
53 		break;
54 	case 1:		/* Status pending */
55 	case 2:		/* Busy */
56 		ret = -EBUSY;
57 		break;
58 	case 3:		/* Device/path not operational */
59 	{
60 		lpm = orb->cmd.lpm;
61 		if (lpm != 0)
62 			sch->lpm &= ~lpm;
63 		else
64 			sch->lpm = 0;
65 
66 		if (cio_update_schib(sch))
67 			ret = -ENODEV;
68 		else
69 			ret = sch->lpm ? -EACCES : -ENODEV;
70 		break;
71 	}
72 	default:
73 		ret = ccode;
74 	}
75 out:
76 	spin_unlock_irqrestore(sch->lock, flags);
77 	return ret;
78 }
79 
fsm_do_halt(struct vfio_ccw_private * private)80 static int fsm_do_halt(struct vfio_ccw_private *private)
81 {
82 	struct subchannel *sch;
83 	unsigned long flags;
84 	int ccode;
85 	int ret;
86 
87 	sch = private->sch;
88 
89 	spin_lock_irqsave(sch->lock, flags);
90 
91 	VFIO_CCW_TRACE_EVENT(2, "haltIO");
92 	VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
93 
94 	/* Issue "Halt Subchannel" */
95 	ccode = hsch(sch->schid);
96 
97 	VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode));
98 
99 	switch (ccode) {
100 	case 0:
101 		/*
102 		 * Initialize device status information
103 		 */
104 		sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
105 		ret = 0;
106 		break;
107 	case 1:		/* Status pending */
108 	case 2:		/* Busy */
109 		ret = -EBUSY;
110 		break;
111 	case 3:		/* Device not operational */
112 		ret = -ENODEV;
113 		break;
114 	default:
115 		ret = ccode;
116 	}
117 	spin_unlock_irqrestore(sch->lock, flags);
118 	return ret;
119 }
120 
fsm_do_clear(struct vfio_ccw_private * private)121 static int fsm_do_clear(struct vfio_ccw_private *private)
122 {
123 	struct subchannel *sch;
124 	unsigned long flags;
125 	int ccode;
126 	int ret;
127 
128 	sch = private->sch;
129 
130 	spin_lock_irqsave(sch->lock, flags);
131 
132 	VFIO_CCW_TRACE_EVENT(2, "clearIO");
133 	VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
134 
135 	/* Issue "Clear Subchannel" */
136 	ccode = csch(sch->schid);
137 
138 	VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode));
139 
140 	switch (ccode) {
141 	case 0:
142 		/*
143 		 * Initialize device status information
144 		 */
145 		sch->schib.scsw.cmd.actl = SCSW_ACTL_CLEAR_PEND;
146 		/* TODO: check what else we might need to clear */
147 		ret = 0;
148 		break;
149 	case 3:		/* Device not operational */
150 		ret = -ENODEV;
151 		break;
152 	default:
153 		ret = ccode;
154 	}
155 	spin_unlock_irqrestore(sch->lock, flags);
156 	return ret;
157 }
158 
fsm_notoper(struct vfio_ccw_private * private,enum vfio_ccw_event event)159 static void fsm_notoper(struct vfio_ccw_private *private,
160 			enum vfio_ccw_event event)
161 {
162 	struct subchannel *sch = private->sch;
163 
164 	VFIO_CCW_TRACE_EVENT(2, "notoper");
165 	VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
166 
167 	/*
168 	 * TODO:
169 	 * Probably we should send the machine check to the guest.
170 	 */
171 	css_sched_sch_todo(sch, SCH_TODO_UNREG);
172 	private->state = VFIO_CCW_STATE_NOT_OPER;
173 }
174 
175 /*
176  * No operation action.
177  */
fsm_nop(struct vfio_ccw_private * private,enum vfio_ccw_event event)178 static void fsm_nop(struct vfio_ccw_private *private,
179 		    enum vfio_ccw_event event)
180 {
181 }
182 
fsm_io_error(struct vfio_ccw_private * private,enum vfio_ccw_event event)183 static void fsm_io_error(struct vfio_ccw_private *private,
184 			 enum vfio_ccw_event event)
185 {
186 	pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state);
187 	private->io_region->ret_code = -EIO;
188 }
189 
fsm_io_busy(struct vfio_ccw_private * private,enum vfio_ccw_event event)190 static void fsm_io_busy(struct vfio_ccw_private *private,
191 			enum vfio_ccw_event event)
192 {
193 	private->io_region->ret_code = -EBUSY;
194 }
195 
fsm_io_retry(struct vfio_ccw_private * private,enum vfio_ccw_event event)196 static void fsm_io_retry(struct vfio_ccw_private *private,
197 			 enum vfio_ccw_event event)
198 {
199 	private->io_region->ret_code = -EAGAIN;
200 }
201 
fsm_async_error(struct vfio_ccw_private * private,enum vfio_ccw_event event)202 static void fsm_async_error(struct vfio_ccw_private *private,
203 			    enum vfio_ccw_event event)
204 {
205 	struct ccw_cmd_region *cmd_region = private->cmd_region;
206 
207 	pr_err("vfio-ccw: FSM: %s request from state:%d\n",
208 	       cmd_region->command == VFIO_CCW_ASYNC_CMD_HSCH ? "halt" :
209 	       cmd_region->command == VFIO_CCW_ASYNC_CMD_CSCH ? "clear" :
210 	       "<unknown>", private->state);
211 	cmd_region->ret_code = -EIO;
212 }
213 
fsm_async_retry(struct vfio_ccw_private * private,enum vfio_ccw_event event)214 static void fsm_async_retry(struct vfio_ccw_private *private,
215 			    enum vfio_ccw_event event)
216 {
217 	private->cmd_region->ret_code = -EAGAIN;
218 }
219 
fsm_disabled_irq(struct vfio_ccw_private * private,enum vfio_ccw_event event)220 static void fsm_disabled_irq(struct vfio_ccw_private *private,
221 			     enum vfio_ccw_event event)
222 {
223 	struct subchannel *sch = private->sch;
224 
225 	/*
226 	 * An interrupt in a disabled state means a previous disable was not
227 	 * successful - should not happen, but we try to disable again.
228 	 */
229 	cio_disable_subchannel(sch);
230 }
get_schid(struct vfio_ccw_private * p)231 inline struct subchannel_id get_schid(struct vfio_ccw_private *p)
232 {
233 	return p->sch->schid;
234 }
235 
236 /*
237  * Deal with the ccw command request from the userspace.
238  */
fsm_io_request(struct vfio_ccw_private * private,enum vfio_ccw_event event)239 static void fsm_io_request(struct vfio_ccw_private *private,
240 			   enum vfio_ccw_event event)
241 {
242 	union orb *orb;
243 	union scsw *scsw = &private->scsw;
244 	struct ccw_io_region *io_region = private->io_region;
245 	struct mdev_device *mdev = private->mdev;
246 	char *errstr = "request";
247 	struct subchannel_id schid = get_schid(private);
248 
249 	private->state = VFIO_CCW_STATE_CP_PROCESSING;
250 	memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
251 
252 	if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
253 		orb = (union orb *)io_region->orb_area;
254 
255 		/* Don't try to build a cp if transport mode is specified. */
256 		if (orb->tm.b) {
257 			io_region->ret_code = -EOPNOTSUPP;
258 			VFIO_CCW_MSG_EVENT(2,
259 					   "%pUl (%x.%x.%04x): transport mode\n",
260 					   mdev_uuid(mdev), schid.cssid,
261 					   schid.ssid, schid.sch_no);
262 			errstr = "transport mode";
263 			goto err_out;
264 		}
265 		io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
266 					      orb);
267 		if (io_region->ret_code) {
268 			VFIO_CCW_MSG_EVENT(2,
269 					   "%pUl (%x.%x.%04x): cp_init=%d\n",
270 					   mdev_uuid(mdev), schid.cssid,
271 					   schid.ssid, schid.sch_no,
272 					   io_region->ret_code);
273 			errstr = "cp init";
274 			goto err_out;
275 		}
276 
277 		io_region->ret_code = cp_prefetch(&private->cp);
278 		if (io_region->ret_code) {
279 			VFIO_CCW_MSG_EVENT(2,
280 					   "%pUl (%x.%x.%04x): cp_prefetch=%d\n",
281 					   mdev_uuid(mdev), schid.cssid,
282 					   schid.ssid, schid.sch_no,
283 					   io_region->ret_code);
284 			errstr = "cp prefetch";
285 			cp_free(&private->cp);
286 			goto err_out;
287 		}
288 
289 		/* Start channel program and wait for I/O interrupt. */
290 		io_region->ret_code = fsm_io_helper(private);
291 		if (io_region->ret_code) {
292 			VFIO_CCW_MSG_EVENT(2,
293 					   "%pUl (%x.%x.%04x): fsm_io_helper=%d\n",
294 					   mdev_uuid(mdev), schid.cssid,
295 					   schid.ssid, schid.sch_no,
296 					   io_region->ret_code);
297 			errstr = "cp fsm_io_helper";
298 			cp_free(&private->cp);
299 			goto err_out;
300 		}
301 		return;
302 	} else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
303 		VFIO_CCW_MSG_EVENT(2,
304 				   "%pUl (%x.%x.%04x): halt on io_region\n",
305 				   mdev_uuid(mdev), schid.cssid,
306 				   schid.ssid, schid.sch_no);
307 		/* halt is handled via the async cmd region */
308 		io_region->ret_code = -EOPNOTSUPP;
309 		goto err_out;
310 	} else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
311 		VFIO_CCW_MSG_EVENT(2,
312 				   "%pUl (%x.%x.%04x): clear on io_region\n",
313 				   mdev_uuid(mdev), schid.cssid,
314 				   schid.ssid, schid.sch_no);
315 		/* clear is handled via the async cmd region */
316 		io_region->ret_code = -EOPNOTSUPP;
317 		goto err_out;
318 	}
319 
320 err_out:
321 	trace_vfio_ccw_fsm_io_request(scsw->cmd.fctl, schid,
322 				      io_region->ret_code, errstr);
323 }
324 
325 /*
326  * Deal with an async request from userspace.
327  */
fsm_async_request(struct vfio_ccw_private * private,enum vfio_ccw_event event)328 static void fsm_async_request(struct vfio_ccw_private *private,
329 			      enum vfio_ccw_event event)
330 {
331 	struct ccw_cmd_region *cmd_region = private->cmd_region;
332 
333 	switch (cmd_region->command) {
334 	case VFIO_CCW_ASYNC_CMD_HSCH:
335 		cmd_region->ret_code = fsm_do_halt(private);
336 		break;
337 	case VFIO_CCW_ASYNC_CMD_CSCH:
338 		cmd_region->ret_code = fsm_do_clear(private);
339 		break;
340 	default:
341 		/* should not happen? */
342 		cmd_region->ret_code = -EINVAL;
343 	}
344 
345 	trace_vfio_ccw_fsm_async_request(get_schid(private),
346 					 cmd_region->command,
347 					 cmd_region->ret_code);
348 }
349 
350 /*
351  * Got an interrupt for a normal io (state busy).
352  */
fsm_irq(struct vfio_ccw_private * private,enum vfio_ccw_event event)353 static void fsm_irq(struct vfio_ccw_private *private,
354 		    enum vfio_ccw_event event)
355 {
356 	struct irb *irb = this_cpu_ptr(&cio_irb);
357 
358 	VFIO_CCW_TRACE_EVENT(6, "IRQ");
359 	VFIO_CCW_TRACE_EVENT(6, dev_name(&private->sch->dev));
360 
361 	memcpy(&private->irb, irb, sizeof(*irb));
362 
363 	queue_work(vfio_ccw_work_q, &private->io_work);
364 
365 	if (private->completion)
366 		complete(private->completion);
367 }
368 
369 /*
370  * Device statemachine
371  */
372 fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
373 	[VFIO_CCW_STATE_NOT_OPER] = {
374 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_nop,
375 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_error,
376 		[VFIO_CCW_EVENT_ASYNC_REQ]	= fsm_async_error,
377 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_disabled_irq,
378 	},
379 	[VFIO_CCW_STATE_STANDBY] = {
380 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
381 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_error,
382 		[VFIO_CCW_EVENT_ASYNC_REQ]	= fsm_async_error,
383 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
384 	},
385 	[VFIO_CCW_STATE_IDLE] = {
386 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
387 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_request,
388 		[VFIO_CCW_EVENT_ASYNC_REQ]	= fsm_async_request,
389 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
390 	},
391 	[VFIO_CCW_STATE_CP_PROCESSING] = {
392 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
393 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_retry,
394 		[VFIO_CCW_EVENT_ASYNC_REQ]	= fsm_async_retry,
395 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
396 	},
397 	[VFIO_CCW_STATE_CP_PENDING] = {
398 		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
399 		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_busy,
400 		[VFIO_CCW_EVENT_ASYNC_REQ]	= fsm_async_request,
401 		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
402 	},
403 };
404