1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * driver for channel subsystem
4  *
5  * Copyright IBM Corp. 2002, 2010
6  *
7  * Author(s): Arnd Bergmann (arndb@de.ibm.com)
8  *	      Cornelia Huck (cornelia.huck@de.ibm.com)
9  */
10 
11 #define KMSG_COMPONENT "cio"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/device.h>
17 #include <linux/slab.h>
18 #include <linux/errno.h>
19 #include <linux/list.h>
20 #include <linux/reboot.h>
21 #include <linux/proc_fs.h>
22 #include <linux/genalloc.h>
23 #include <linux/dma-mapping.h>
24 #include <asm/isc.h>
25 #include <asm/crw.h>
26 
27 #include "css.h"
28 #include "cio.h"
29 #include "blacklist.h"
30 #include "cio_debug.h"
31 #include "ioasm.h"
32 #include "chsc.h"
33 #include "device.h"
34 #include "idset.h"
35 #include "chp.h"
36 
37 int css_init_done = 0;
38 int max_ssid;
39 
40 #define MAX_CSS_IDX 0
41 struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
42 static struct bus_type css_bus_type;
43 
44 int
for_each_subchannel(int (* fn)(struct subchannel_id,void *),void * data)45 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
46 {
47 	struct subchannel_id schid;
48 	int ret;
49 
50 	init_subchannel_id(&schid);
51 	do {
52 		do {
53 			ret = fn(schid, data);
54 			if (ret)
55 				break;
56 		} while (schid.sch_no++ < __MAX_SUBCHANNEL);
57 		schid.sch_no = 0;
58 	} while (schid.ssid++ < max_ssid);
59 	return ret;
60 }
61 
62 struct cb_data {
63 	void *data;
64 	struct idset *set;
65 	int (*fn_known_sch)(struct subchannel *, void *);
66 	int (*fn_unknown_sch)(struct subchannel_id, void *);
67 };
68 
call_fn_known_sch(struct device * dev,void * data)69 static int call_fn_known_sch(struct device *dev, void *data)
70 {
71 	struct subchannel *sch = to_subchannel(dev);
72 	struct cb_data *cb = data;
73 	int rc = 0;
74 
75 	if (cb->set)
76 		idset_sch_del(cb->set, sch->schid);
77 	if (cb->fn_known_sch)
78 		rc = cb->fn_known_sch(sch, cb->data);
79 	return rc;
80 }
81 
call_fn_unknown_sch(struct subchannel_id schid,void * data)82 static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
83 {
84 	struct cb_data *cb = data;
85 	int rc = 0;
86 
87 	if (idset_sch_contains(cb->set, schid))
88 		rc = cb->fn_unknown_sch(schid, cb->data);
89 	return rc;
90 }
91 
call_fn_all_sch(struct subchannel_id schid,void * data)92 static int call_fn_all_sch(struct subchannel_id schid, void *data)
93 {
94 	struct cb_data *cb = data;
95 	struct subchannel *sch;
96 	int rc = 0;
97 
98 	sch = get_subchannel_by_schid(schid);
99 	if (sch) {
100 		if (cb->fn_known_sch)
101 			rc = cb->fn_known_sch(sch, cb->data);
102 		put_device(&sch->dev);
103 	} else {
104 		if (cb->fn_unknown_sch)
105 			rc = cb->fn_unknown_sch(schid, cb->data);
106 	}
107 
108 	return rc;
109 }
110 
for_each_subchannel_staged(int (* fn_known)(struct subchannel *,void *),int (* fn_unknown)(struct subchannel_id,void *),void * data)111 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
112 			       int (*fn_unknown)(struct subchannel_id,
113 			       void *), void *data)
114 {
115 	struct cb_data cb;
116 	int rc;
117 
118 	cb.data = data;
119 	cb.fn_known_sch = fn_known;
120 	cb.fn_unknown_sch = fn_unknown;
121 
122 	if (fn_known && !fn_unknown) {
123 		/* Skip idset allocation in case of known-only loop. */
124 		cb.set = NULL;
125 		return bus_for_each_dev(&css_bus_type, NULL, &cb,
126 					call_fn_known_sch);
127 	}
128 
129 	cb.set = idset_sch_new();
130 	if (!cb.set)
131 		/* fall back to brute force scanning in case of oom */
132 		return for_each_subchannel(call_fn_all_sch, &cb);
133 
134 	idset_fill(cb.set);
135 
136 	/* Process registered subchannels. */
137 	rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
138 	if (rc)
139 		goto out;
140 	/* Process unregistered subchannels. */
141 	if (fn_unknown)
142 		rc = for_each_subchannel(call_fn_unknown_sch, &cb);
143 out:
144 	idset_free(cb.set);
145 
146 	return rc;
147 }
148 
149 static void css_sch_todo(struct work_struct *work);
150 
css_sch_create_locks(struct subchannel * sch)151 static int css_sch_create_locks(struct subchannel *sch)
152 {
153 	sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
154 	if (!sch->lock)
155 		return -ENOMEM;
156 
157 	spin_lock_init(sch->lock);
158 	mutex_init(&sch->reg_mutex);
159 
160 	return 0;
161 }
162 
css_subchannel_release(struct device * dev)163 static void css_subchannel_release(struct device *dev)
164 {
165 	struct subchannel *sch = to_subchannel(dev);
166 
167 	sch->config.intparm = 0;
168 	cio_commit_config(sch);
169 	kfree(sch->driver_override);
170 	kfree(sch->lock);
171 	kfree(sch);
172 }
173 
css_validate_subchannel(struct subchannel_id schid,struct schib * schib)174 static int css_validate_subchannel(struct subchannel_id schid,
175 				   struct schib *schib)
176 {
177 	int err;
178 
179 	switch (schib->pmcw.st) {
180 	case SUBCHANNEL_TYPE_IO:
181 	case SUBCHANNEL_TYPE_MSG:
182 		if (!css_sch_is_valid(schib))
183 			err = -ENODEV;
184 		else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) {
185 			CIO_MSG_EVENT(6, "Blacklisted device detected "
186 				      "at devno %04X, subchannel set %x\n",
187 				      schib->pmcw.dev, schid.ssid);
188 			err = -ENODEV;
189 		} else
190 			err = 0;
191 		break;
192 	default:
193 		err = 0;
194 	}
195 	if (err)
196 		goto out;
197 
198 	CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
199 		      schid.ssid, schid.sch_no, schib->pmcw.st);
200 out:
201 	return err;
202 }
203 
css_alloc_subchannel(struct subchannel_id schid,struct schib * schib)204 struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
205 					struct schib *schib)
206 {
207 	struct subchannel *sch;
208 	int ret;
209 
210 	ret = css_validate_subchannel(schid, schib);
211 	if (ret < 0)
212 		return ERR_PTR(ret);
213 
214 	sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
215 	if (!sch)
216 		return ERR_PTR(-ENOMEM);
217 
218 	sch->schid = schid;
219 	sch->schib = *schib;
220 	sch->st = schib->pmcw.st;
221 
222 	ret = css_sch_create_locks(sch);
223 	if (ret)
224 		goto err;
225 
226 	INIT_WORK(&sch->todo_work, css_sch_todo);
227 	sch->dev.release = &css_subchannel_release;
228 	sch->dev.dma_mask = &sch->dma_mask;
229 	device_initialize(&sch->dev);
230 	/*
231 	 * The physical addresses for some of the dma structures that can
232 	 * belong to a subchannel need to fit 31 bit width (e.g. ccw).
233 	 */
234 	ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31));
235 	if (ret)
236 		goto err;
237 	/*
238 	 * But we don't have such restrictions imposed on the stuff that
239 	 * is handled by the streaming API.
240 	 */
241 	ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64));
242 	if (ret)
243 		goto err;
244 
245 	return sch;
246 
247 err:
248 	kfree(sch);
249 	return ERR_PTR(ret);
250 }
251 
css_sch_device_register(struct subchannel * sch)252 static int css_sch_device_register(struct subchannel *sch)
253 {
254 	int ret;
255 
256 	mutex_lock(&sch->reg_mutex);
257 	dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
258 		     sch->schid.sch_no);
259 	ret = device_add(&sch->dev);
260 	mutex_unlock(&sch->reg_mutex);
261 	return ret;
262 }
263 
264 /**
265  * css_sch_device_unregister - unregister a subchannel
266  * @sch: subchannel to be unregistered
267  */
css_sch_device_unregister(struct subchannel * sch)268 void css_sch_device_unregister(struct subchannel *sch)
269 {
270 	mutex_lock(&sch->reg_mutex);
271 	if (device_is_registered(&sch->dev))
272 		device_unregister(&sch->dev);
273 	mutex_unlock(&sch->reg_mutex);
274 }
275 EXPORT_SYMBOL_GPL(css_sch_device_unregister);
276 
ssd_from_pmcw(struct chsc_ssd_info * ssd,struct pmcw * pmcw)277 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
278 {
279 	int i;
280 	int mask;
281 
282 	memset(ssd, 0, sizeof(struct chsc_ssd_info));
283 	ssd->path_mask = pmcw->pim;
284 	for (i = 0; i < 8; i++) {
285 		mask = 0x80 >> i;
286 		if (pmcw->pim & mask) {
287 			chp_id_init(&ssd->chpid[i]);
288 			ssd->chpid[i].id = pmcw->chpid[i];
289 		}
290 	}
291 }
292 
ssd_register_chpids(struct chsc_ssd_info * ssd)293 static void ssd_register_chpids(struct chsc_ssd_info *ssd)
294 {
295 	int i;
296 	int mask;
297 
298 	for (i = 0; i < 8; i++) {
299 		mask = 0x80 >> i;
300 		if (ssd->path_mask & mask)
301 			chp_new(ssd->chpid[i]);
302 	}
303 }
304 
css_update_ssd_info(struct subchannel * sch)305 void css_update_ssd_info(struct subchannel *sch)
306 {
307 	int ret;
308 
309 	ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
310 	if (ret)
311 		ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
312 
313 	ssd_register_chpids(&sch->ssd_info);
314 }
315 
type_show(struct device * dev,struct device_attribute * attr,char * buf)316 static ssize_t type_show(struct device *dev, struct device_attribute *attr,
317 			 char *buf)
318 {
319 	struct subchannel *sch = to_subchannel(dev);
320 
321 	return sprintf(buf, "%01x\n", sch->st);
322 }
323 
324 static DEVICE_ATTR_RO(type);
325 
modalias_show(struct device * dev,struct device_attribute * attr,char * buf)326 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
327 			     char *buf)
328 {
329 	struct subchannel *sch = to_subchannel(dev);
330 
331 	return sprintf(buf, "css:t%01X\n", sch->st);
332 }
333 
334 static DEVICE_ATTR_RO(modalias);
335 
driver_override_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)336 static ssize_t driver_override_store(struct device *dev,
337 				     struct device_attribute *attr,
338 				     const char *buf, size_t count)
339 {
340 	struct subchannel *sch = to_subchannel(dev);
341 	char *driver_override, *old, *cp;
342 
343 	/* We need to keep extra room for a newline */
344 	if (count >= (PAGE_SIZE - 1))
345 		return -EINVAL;
346 
347 	driver_override = kstrndup(buf, count, GFP_KERNEL);
348 	if (!driver_override)
349 		return -ENOMEM;
350 
351 	cp = strchr(driver_override, '\n');
352 	if (cp)
353 		*cp = '\0';
354 
355 	device_lock(dev);
356 	old = sch->driver_override;
357 	if (strlen(driver_override)) {
358 		sch->driver_override = driver_override;
359 	} else {
360 		kfree(driver_override);
361 		sch->driver_override = NULL;
362 	}
363 	device_unlock(dev);
364 
365 	kfree(old);
366 
367 	return count;
368 }
369 
driver_override_show(struct device * dev,struct device_attribute * attr,char * buf)370 static ssize_t driver_override_show(struct device *dev,
371 				    struct device_attribute *attr, char *buf)
372 {
373 	struct subchannel *sch = to_subchannel(dev);
374 	ssize_t len;
375 
376 	device_lock(dev);
377 	len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override);
378 	device_unlock(dev);
379 	return len;
380 }
381 static DEVICE_ATTR_RW(driver_override);
382 
383 static struct attribute *subch_attrs[] = {
384 	&dev_attr_type.attr,
385 	&dev_attr_modalias.attr,
386 	&dev_attr_driver_override.attr,
387 	NULL,
388 };
389 
390 static struct attribute_group subch_attr_group = {
391 	.attrs = subch_attrs,
392 };
393 
394 static const struct attribute_group *default_subch_attr_groups[] = {
395 	&subch_attr_group,
396 	NULL,
397 };
398 
chpids_show(struct device * dev,struct device_attribute * attr,char * buf)399 static ssize_t chpids_show(struct device *dev,
400 			   struct device_attribute *attr,
401 			   char *buf)
402 {
403 	struct subchannel *sch = to_subchannel(dev);
404 	struct chsc_ssd_info *ssd = &sch->ssd_info;
405 	ssize_t ret = 0;
406 	int mask;
407 	int chp;
408 
409 	for (chp = 0; chp < 8; chp++) {
410 		mask = 0x80 >> chp;
411 		if (ssd->path_mask & mask)
412 			ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
413 		else
414 			ret += sprintf(buf + ret, "00 ");
415 	}
416 	ret += sprintf(buf + ret, "\n");
417 	return ret;
418 }
419 static DEVICE_ATTR_RO(chpids);
420 
pimpampom_show(struct device * dev,struct device_attribute * attr,char * buf)421 static ssize_t pimpampom_show(struct device *dev,
422 			      struct device_attribute *attr,
423 			      char *buf)
424 {
425 	struct subchannel *sch = to_subchannel(dev);
426 	struct pmcw *pmcw = &sch->schib.pmcw;
427 
428 	return sprintf(buf, "%02x %02x %02x\n",
429 		       pmcw->pim, pmcw->pam, pmcw->pom);
430 }
431 static DEVICE_ATTR_RO(pimpampom);
432 
433 static struct attribute *io_subchannel_type_attrs[] = {
434 	&dev_attr_chpids.attr,
435 	&dev_attr_pimpampom.attr,
436 	NULL,
437 };
438 ATTRIBUTE_GROUPS(io_subchannel_type);
439 
440 static const struct device_type io_subchannel_type = {
441 	.groups = io_subchannel_type_groups,
442 };
443 
css_register_subchannel(struct subchannel * sch)444 int css_register_subchannel(struct subchannel *sch)
445 {
446 	int ret;
447 
448 	/* Initialize the subchannel structure */
449 	sch->dev.parent = &channel_subsystems[0]->device;
450 	sch->dev.bus = &css_bus_type;
451 	sch->dev.groups = default_subch_attr_groups;
452 
453 	if (sch->st == SUBCHANNEL_TYPE_IO)
454 		sch->dev.type = &io_subchannel_type;
455 
456 	/*
457 	 * We don't want to generate uevents for I/O subchannels that don't
458 	 * have a working ccw device behind them since they will be
459 	 * unregistered before they can be used anyway, so we delay the add
460 	 * uevent until after device recognition was successful.
461 	 * Note that we suppress the uevent for all subchannel types;
462 	 * the subchannel driver can decide itself when it wants to inform
463 	 * userspace of its existence.
464 	 */
465 	dev_set_uevent_suppress(&sch->dev, 1);
466 	css_update_ssd_info(sch);
467 	/* make it known to the system */
468 	ret = css_sch_device_register(sch);
469 	if (ret) {
470 		CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
471 			      sch->schid.ssid, sch->schid.sch_no, ret);
472 		return ret;
473 	}
474 	if (!sch->driver) {
475 		/*
476 		 * No driver matched. Generate the uevent now so that
477 		 * a fitting driver module may be loaded based on the
478 		 * modalias.
479 		 */
480 		dev_set_uevent_suppress(&sch->dev, 0);
481 		kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
482 	}
483 	return ret;
484 }
485 
css_probe_device(struct subchannel_id schid,struct schib * schib)486 static int css_probe_device(struct subchannel_id schid, struct schib *schib)
487 {
488 	struct subchannel *sch;
489 	int ret;
490 
491 	sch = css_alloc_subchannel(schid, schib);
492 	if (IS_ERR(sch))
493 		return PTR_ERR(sch);
494 
495 	ret = css_register_subchannel(sch);
496 	if (ret)
497 		put_device(&sch->dev);
498 
499 	return ret;
500 }
501 
502 static int
check_subchannel(struct device * dev,const void * data)503 check_subchannel(struct device *dev, const void *data)
504 {
505 	struct subchannel *sch;
506 	struct subchannel_id *schid = (void *)data;
507 
508 	sch = to_subchannel(dev);
509 	return schid_equal(&sch->schid, schid);
510 }
511 
512 struct subchannel *
get_subchannel_by_schid(struct subchannel_id schid)513 get_subchannel_by_schid(struct subchannel_id schid)
514 {
515 	struct device *dev;
516 
517 	dev = bus_find_device(&css_bus_type, NULL,
518 			      &schid, check_subchannel);
519 
520 	return dev ? to_subchannel(dev) : NULL;
521 }
522 
523 /**
524  * css_sch_is_valid() - check if a subchannel is valid
525  * @schib: subchannel information block for the subchannel
526  */
css_sch_is_valid(struct schib * schib)527 int css_sch_is_valid(struct schib *schib)
528 {
529 	if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
530 		return 0;
531 	if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
532 		return 0;
533 	return 1;
534 }
535 EXPORT_SYMBOL_GPL(css_sch_is_valid);
536 
css_evaluate_new_subchannel(struct subchannel_id schid,int slow)537 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
538 {
539 	struct schib schib;
540 	int ccode;
541 
542 	if (!slow) {
543 		/* Will be done on the slow path. */
544 		return -EAGAIN;
545 	}
546 	/*
547 	 * The first subchannel that is not-operational (ccode==3)
548 	 * indicates that there aren't any more devices available.
549 	 * If stsch gets an exception, it means the current subchannel set
550 	 * is not valid.
551 	 */
552 	ccode = stsch(schid, &schib);
553 	if (ccode)
554 		return (ccode == 3) ? -ENXIO : ccode;
555 
556 	return css_probe_device(schid, &schib);
557 }
558 
css_evaluate_known_subchannel(struct subchannel * sch,int slow)559 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
560 {
561 	int ret = 0;
562 
563 	if (sch->driver) {
564 		if (sch->driver->sch_event)
565 			ret = sch->driver->sch_event(sch, slow);
566 		else
567 			dev_dbg(&sch->dev,
568 				"Got subchannel machine check but "
569 				"no sch_event handler provided.\n");
570 	}
571 	if (ret != 0 && ret != -EAGAIN) {
572 		CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
573 			      sch->schid.ssid, sch->schid.sch_no, ret);
574 	}
575 	return ret;
576 }
577 
css_evaluate_subchannel(struct subchannel_id schid,int slow)578 static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
579 {
580 	struct subchannel *sch;
581 	int ret;
582 
583 	sch = get_subchannel_by_schid(schid);
584 	if (sch) {
585 		ret = css_evaluate_known_subchannel(sch, slow);
586 		put_device(&sch->dev);
587 	} else
588 		ret = css_evaluate_new_subchannel(schid, slow);
589 	if (ret == -EAGAIN)
590 		css_schedule_eval(schid);
591 }
592 
593 /**
594  * css_sched_sch_todo - schedule a subchannel operation
595  * @sch: subchannel
596  * @todo: todo
597  *
598  * Schedule the operation identified by @todo to be performed on the slow path
599  * workqueue. Do nothing if another operation with higher priority is already
600  * scheduled. Needs to be called with subchannel lock held.
601  */
css_sched_sch_todo(struct subchannel * sch,enum sch_todo todo)602 void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
603 {
604 	CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
605 		      sch->schid.ssid, sch->schid.sch_no, todo);
606 	if (sch->todo >= todo)
607 		return;
608 	/* Get workqueue ref. */
609 	if (!get_device(&sch->dev))
610 		return;
611 	sch->todo = todo;
612 	if (!queue_work(cio_work_q, &sch->todo_work)) {
613 		/* Already queued, release workqueue ref. */
614 		put_device(&sch->dev);
615 	}
616 }
617 EXPORT_SYMBOL_GPL(css_sched_sch_todo);
618 
css_sch_todo(struct work_struct * work)619 static void css_sch_todo(struct work_struct *work)
620 {
621 	struct subchannel *sch;
622 	enum sch_todo todo;
623 	int ret;
624 
625 	sch = container_of(work, struct subchannel, todo_work);
626 	/* Find out todo. */
627 	spin_lock_irq(sch->lock);
628 	todo = sch->todo;
629 	CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
630 		      sch->schid.sch_no, todo);
631 	sch->todo = SCH_TODO_NOTHING;
632 	spin_unlock_irq(sch->lock);
633 	/* Perform todo. */
634 	switch (todo) {
635 	case SCH_TODO_NOTHING:
636 		break;
637 	case SCH_TODO_EVAL:
638 		ret = css_evaluate_known_subchannel(sch, 1);
639 		if (ret == -EAGAIN) {
640 			spin_lock_irq(sch->lock);
641 			css_sched_sch_todo(sch, todo);
642 			spin_unlock_irq(sch->lock);
643 		}
644 		break;
645 	case SCH_TODO_UNREG:
646 		css_sch_device_unregister(sch);
647 		break;
648 	}
649 	/* Release workqueue ref. */
650 	put_device(&sch->dev);
651 }
652 
653 static struct idset *slow_subchannel_set;
654 static DEFINE_SPINLOCK(slow_subchannel_lock);
655 static DECLARE_WAIT_QUEUE_HEAD(css_eval_wq);
656 static atomic_t css_eval_scheduled;
657 
slow_subchannel_init(void)658 static int __init slow_subchannel_init(void)
659 {
660 	atomic_set(&css_eval_scheduled, 0);
661 	slow_subchannel_set = idset_sch_new();
662 	if (!slow_subchannel_set) {
663 		CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
664 		return -ENOMEM;
665 	}
666 	return 0;
667 }
668 
slow_eval_known_fn(struct subchannel * sch,void * data)669 static int slow_eval_known_fn(struct subchannel *sch, void *data)
670 {
671 	int eval;
672 	int rc;
673 
674 	spin_lock_irq(&slow_subchannel_lock);
675 	eval = idset_sch_contains(slow_subchannel_set, sch->schid);
676 	idset_sch_del(slow_subchannel_set, sch->schid);
677 	spin_unlock_irq(&slow_subchannel_lock);
678 	if (eval) {
679 		rc = css_evaluate_known_subchannel(sch, 1);
680 		if (rc == -EAGAIN)
681 			css_schedule_eval(sch->schid);
682 		/*
683 		 * The loop might take long time for platforms with lots of
684 		 * known devices. Allow scheduling here.
685 		 */
686 		cond_resched();
687 	}
688 	return 0;
689 }
690 
slow_eval_unknown_fn(struct subchannel_id schid,void * data)691 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
692 {
693 	int eval;
694 	int rc = 0;
695 
696 	spin_lock_irq(&slow_subchannel_lock);
697 	eval = idset_sch_contains(slow_subchannel_set, schid);
698 	idset_sch_del(slow_subchannel_set, schid);
699 	spin_unlock_irq(&slow_subchannel_lock);
700 	if (eval) {
701 		rc = css_evaluate_new_subchannel(schid, 1);
702 		switch (rc) {
703 		case -EAGAIN:
704 			css_schedule_eval(schid);
705 			rc = 0;
706 			break;
707 		case -ENXIO:
708 		case -ENOMEM:
709 		case -EIO:
710 			/* These should abort looping */
711 			spin_lock_irq(&slow_subchannel_lock);
712 			idset_sch_del_subseq(slow_subchannel_set, schid);
713 			spin_unlock_irq(&slow_subchannel_lock);
714 			break;
715 		default:
716 			rc = 0;
717 		}
718 		/* Allow scheduling here since the containing loop might
719 		 * take a while.  */
720 		cond_resched();
721 	}
722 	return rc;
723 }
724 
css_slow_path_func(struct work_struct * unused)725 static void css_slow_path_func(struct work_struct *unused)
726 {
727 	unsigned long flags;
728 
729 	CIO_TRACE_EVENT(4, "slowpath");
730 	for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
731 				   NULL);
732 	spin_lock_irqsave(&slow_subchannel_lock, flags);
733 	if (idset_is_empty(slow_subchannel_set)) {
734 		atomic_set(&css_eval_scheduled, 0);
735 		wake_up(&css_eval_wq);
736 	}
737 	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
738 }
739 
740 static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
741 struct workqueue_struct *cio_work_q;
742 
css_schedule_eval(struct subchannel_id schid)743 void css_schedule_eval(struct subchannel_id schid)
744 {
745 	unsigned long flags;
746 
747 	spin_lock_irqsave(&slow_subchannel_lock, flags);
748 	idset_sch_add(slow_subchannel_set, schid);
749 	atomic_set(&css_eval_scheduled, 1);
750 	queue_delayed_work(cio_work_q, &slow_path_work, 0);
751 	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
752 }
753 
css_schedule_eval_all(void)754 void css_schedule_eval_all(void)
755 {
756 	unsigned long flags;
757 
758 	spin_lock_irqsave(&slow_subchannel_lock, flags);
759 	idset_fill(slow_subchannel_set);
760 	atomic_set(&css_eval_scheduled, 1);
761 	queue_delayed_work(cio_work_q, &slow_path_work, 0);
762 	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
763 }
764 
__unset_registered(struct device * dev,void * data)765 static int __unset_registered(struct device *dev, void *data)
766 {
767 	struct idset *set = data;
768 	struct subchannel *sch = to_subchannel(dev);
769 
770 	idset_sch_del(set, sch->schid);
771 	return 0;
772 }
773 
css_schedule_eval_all_unreg(unsigned long delay)774 void css_schedule_eval_all_unreg(unsigned long delay)
775 {
776 	unsigned long flags;
777 	struct idset *unreg_set;
778 
779 	/* Find unregistered subchannels. */
780 	unreg_set = idset_sch_new();
781 	if (!unreg_set) {
782 		/* Fallback. */
783 		css_schedule_eval_all();
784 		return;
785 	}
786 	idset_fill(unreg_set);
787 	bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
788 	/* Apply to slow_subchannel_set. */
789 	spin_lock_irqsave(&slow_subchannel_lock, flags);
790 	idset_add_set(slow_subchannel_set, unreg_set);
791 	atomic_set(&css_eval_scheduled, 1);
792 	queue_delayed_work(cio_work_q, &slow_path_work, delay);
793 	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
794 	idset_free(unreg_set);
795 }
796 
css_wait_for_slow_path(void)797 void css_wait_for_slow_path(void)
798 {
799 	flush_workqueue(cio_work_q);
800 }
801 
802 /* Schedule reprobing of all unregistered subchannels. */
css_schedule_reprobe(void)803 void css_schedule_reprobe(void)
804 {
805 	/* Schedule with a delay to allow merging of subsequent calls. */
806 	css_schedule_eval_all_unreg(1 * HZ);
807 }
808 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
809 
810 /*
811  * Called from the machine check handler for subchannel report words.
812  */
css_process_crw(struct crw * crw0,struct crw * crw1,int overflow)813 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
814 {
815 	struct subchannel_id mchk_schid;
816 	struct subchannel *sch;
817 
818 	if (overflow) {
819 		css_schedule_eval_all();
820 		return;
821 	}
822 	CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
823 		      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
824 		      crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
825 		      crw0->erc, crw0->rsid);
826 	if (crw1)
827 		CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
828 			      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
829 			      crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
830 			      crw1->anc, crw1->erc, crw1->rsid);
831 	init_subchannel_id(&mchk_schid);
832 	mchk_schid.sch_no = crw0->rsid;
833 	if (crw1)
834 		mchk_schid.ssid = (crw1->rsid >> 4) & 3;
835 
836 	if (crw0->erc == CRW_ERC_PMOD) {
837 		sch = get_subchannel_by_schid(mchk_schid);
838 		if (sch) {
839 			css_update_ssd_info(sch);
840 			put_device(&sch->dev);
841 		}
842 	}
843 	/*
844 	 * Since we are always presented with IPI in the CRW, we have to
845 	 * use stsch() to find out if the subchannel in question has come
846 	 * or gone.
847 	 */
848 	css_evaluate_subchannel(mchk_schid, 0);
849 }
850 
851 static void __init
css_generate_pgid(struct channel_subsystem * css,u32 tod_high)852 css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
853 {
854 	struct cpuid cpu_id;
855 
856 	if (css_general_characteristics.mcss) {
857 		css->global_pgid.pgid_high.ext_cssid.version = 0x80;
858 		css->global_pgid.pgid_high.ext_cssid.cssid =
859 			css->id_valid ? css->cssid : 0;
860 	} else {
861 		css->global_pgid.pgid_high.cpu_addr = stap();
862 	}
863 	get_cpu_id(&cpu_id);
864 	css->global_pgid.cpu_id = cpu_id.ident;
865 	css->global_pgid.cpu_model = cpu_id.machine;
866 	css->global_pgid.tod_high = tod_high;
867 }
868 
channel_subsystem_release(struct device * dev)869 static void channel_subsystem_release(struct device *dev)
870 {
871 	struct channel_subsystem *css = to_css(dev);
872 
873 	mutex_destroy(&css->mutex);
874 	kfree(css);
875 }
876 
real_cssid_show(struct device * dev,struct device_attribute * a,char * buf)877 static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
878 			       char *buf)
879 {
880 	struct channel_subsystem *css = to_css(dev);
881 
882 	if (!css->id_valid)
883 		return -EINVAL;
884 
885 	return sprintf(buf, "%x\n", css->cssid);
886 }
887 static DEVICE_ATTR_RO(real_cssid);
888 
cm_enable_show(struct device * dev,struct device_attribute * a,char * buf)889 static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
890 			      char *buf)
891 {
892 	struct channel_subsystem *css = to_css(dev);
893 	int ret;
894 
895 	mutex_lock(&css->mutex);
896 	ret = sprintf(buf, "%x\n", css->cm_enabled);
897 	mutex_unlock(&css->mutex);
898 	return ret;
899 }
900 
cm_enable_store(struct device * dev,struct device_attribute * a,const char * buf,size_t count)901 static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
902 			       const char *buf, size_t count)
903 {
904 	struct channel_subsystem *css = to_css(dev);
905 	unsigned long val;
906 	int ret;
907 
908 	ret = kstrtoul(buf, 16, &val);
909 	if (ret)
910 		return ret;
911 	mutex_lock(&css->mutex);
912 	switch (val) {
913 	case 0:
914 		ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
915 		break;
916 	case 1:
917 		ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
918 		break;
919 	default:
920 		ret = -EINVAL;
921 	}
922 	mutex_unlock(&css->mutex);
923 	return ret < 0 ? ret : count;
924 }
925 static DEVICE_ATTR_RW(cm_enable);
926 
cm_enable_mode(struct kobject * kobj,struct attribute * attr,int index)927 static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
928 			      int index)
929 {
930 	return css_chsc_characteristics.secm ? attr->mode : 0;
931 }
932 
933 static struct attribute *cssdev_attrs[] = {
934 	&dev_attr_real_cssid.attr,
935 	NULL,
936 };
937 
938 static struct attribute_group cssdev_attr_group = {
939 	.attrs = cssdev_attrs,
940 };
941 
942 static struct attribute *cssdev_cm_attrs[] = {
943 	&dev_attr_cm_enable.attr,
944 	NULL,
945 };
946 
947 static struct attribute_group cssdev_cm_attr_group = {
948 	.attrs = cssdev_cm_attrs,
949 	.is_visible = cm_enable_mode,
950 };
951 
952 static const struct attribute_group *cssdev_attr_groups[] = {
953 	&cssdev_attr_group,
954 	&cssdev_cm_attr_group,
955 	NULL,
956 };
957 
setup_css(int nr)958 static int __init setup_css(int nr)
959 {
960 	struct channel_subsystem *css;
961 	int ret;
962 
963 	css = kzalloc(sizeof(*css), GFP_KERNEL);
964 	if (!css)
965 		return -ENOMEM;
966 
967 	channel_subsystems[nr] = css;
968 	dev_set_name(&css->device, "css%x", nr);
969 	css->device.groups = cssdev_attr_groups;
970 	css->device.release = channel_subsystem_release;
971 	/*
972 	 * We currently allocate notifier bits with this (using
973 	 * css->device as the device argument with the DMA API)
974 	 * and are fine with 64 bit addresses.
975 	 */
976 	ret = dma_coerce_mask_and_coherent(&css->device, DMA_BIT_MASK(64));
977 	if (ret) {
978 		kfree(css);
979 		goto out_err;
980 	}
981 
982 	mutex_init(&css->mutex);
983 	ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);
984 	if (!ret) {
985 		css->id_valid = true;
986 		pr_info("Partition identifier %01x.%01x\n", css->cssid,
987 			css->iid);
988 	}
989 	css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
990 
991 	ret = device_register(&css->device);
992 	if (ret) {
993 		put_device(&css->device);
994 		goto out_err;
995 	}
996 
997 	css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
998 					 GFP_KERNEL);
999 	if (!css->pseudo_subchannel) {
1000 		device_unregister(&css->device);
1001 		ret = -ENOMEM;
1002 		goto out_err;
1003 	}
1004 
1005 	css->pseudo_subchannel->dev.parent = &css->device;
1006 	css->pseudo_subchannel->dev.release = css_subchannel_release;
1007 	mutex_init(&css->pseudo_subchannel->reg_mutex);
1008 	ret = css_sch_create_locks(css->pseudo_subchannel);
1009 	if (ret) {
1010 		kfree(css->pseudo_subchannel);
1011 		device_unregister(&css->device);
1012 		goto out_err;
1013 	}
1014 
1015 	dev_set_name(&css->pseudo_subchannel->dev, "defunct");
1016 	ret = device_register(&css->pseudo_subchannel->dev);
1017 	if (ret) {
1018 		put_device(&css->pseudo_subchannel->dev);
1019 		device_unregister(&css->device);
1020 		goto out_err;
1021 	}
1022 
1023 	return ret;
1024 out_err:
1025 	channel_subsystems[nr] = NULL;
1026 	return ret;
1027 }
1028 
css_reboot_event(struct notifier_block * this,unsigned long event,void * ptr)1029 static int css_reboot_event(struct notifier_block *this,
1030 			    unsigned long event,
1031 			    void *ptr)
1032 {
1033 	struct channel_subsystem *css;
1034 	int ret;
1035 
1036 	ret = NOTIFY_DONE;
1037 	for_each_css(css) {
1038 		mutex_lock(&css->mutex);
1039 		if (css->cm_enabled)
1040 			if (chsc_secm(css, 0))
1041 				ret = NOTIFY_BAD;
1042 		mutex_unlock(&css->mutex);
1043 	}
1044 
1045 	return ret;
1046 }
1047 
1048 static struct notifier_block css_reboot_notifier = {
1049 	.notifier_call = css_reboot_event,
1050 };
1051 
1052 #define  CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
1053 static struct gen_pool *cio_dma_pool;
1054 
1055 /* Currently cio supports only a single css */
cio_get_dma_css_dev(void)1056 struct device *cio_get_dma_css_dev(void)
1057 {
1058 	return &channel_subsystems[0]->device;
1059 }
1060 
cio_gp_dma_create(struct device * dma_dev,int nr_pages)1061 struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
1062 {
1063 	struct gen_pool *gp_dma;
1064 	void *cpu_addr;
1065 	dma_addr_t dma_addr;
1066 	int i;
1067 
1068 	gp_dma = gen_pool_create(3, -1);
1069 	if (!gp_dma)
1070 		return NULL;
1071 	for (i = 0; i < nr_pages; ++i) {
1072 		cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
1073 					      CIO_DMA_GFP);
1074 		if (!cpu_addr)
1075 			return gp_dma;
1076 		gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
1077 				  dma_addr, PAGE_SIZE, -1);
1078 	}
1079 	return gp_dma;
1080 }
1081 
__gp_dma_free_dma(struct gen_pool * pool,struct gen_pool_chunk * chunk,void * data)1082 static void __gp_dma_free_dma(struct gen_pool *pool,
1083 			      struct gen_pool_chunk *chunk, void *data)
1084 {
1085 	size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
1086 
1087 	dma_free_coherent((struct device *) data, chunk_size,
1088 			 (void *) chunk->start_addr,
1089 			 (dma_addr_t) chunk->phys_addr);
1090 }
1091 
cio_gp_dma_destroy(struct gen_pool * gp_dma,struct device * dma_dev)1092 void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
1093 {
1094 	if (!gp_dma)
1095 		return;
1096 	/* this is quite ugly but no better idea */
1097 	gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev);
1098 	gen_pool_destroy(gp_dma);
1099 }
1100 
cio_dma_pool_init(void)1101 static int cio_dma_pool_init(void)
1102 {
1103 	/* No need to free up the resources: compiled in */
1104 	cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
1105 	if (!cio_dma_pool)
1106 		return -ENOMEM;
1107 	return 0;
1108 }
1109 
cio_gp_dma_zalloc(struct gen_pool * gp_dma,struct device * dma_dev,size_t size)1110 void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
1111 			size_t size)
1112 {
1113 	dma_addr_t dma_addr;
1114 	unsigned long addr;
1115 	size_t chunk_size;
1116 
1117 	if (!gp_dma)
1118 		return NULL;
1119 	addr = gen_pool_alloc(gp_dma, size);
1120 	while (!addr) {
1121 		chunk_size = round_up(size, PAGE_SIZE);
1122 		addr = (unsigned long) dma_alloc_coherent(dma_dev,
1123 					 chunk_size, &dma_addr, CIO_DMA_GFP);
1124 		if (!addr)
1125 			return NULL;
1126 		gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
1127 		addr = gen_pool_alloc(gp_dma, size);
1128 	}
1129 	return (void *) addr;
1130 }
1131 
cio_gp_dma_free(struct gen_pool * gp_dma,void * cpu_addr,size_t size)1132 void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
1133 {
1134 	if (!cpu_addr)
1135 		return;
1136 	memset(cpu_addr, 0, size);
1137 	gen_pool_free(gp_dma, (unsigned long) cpu_addr, size);
1138 }
1139 
1140 /*
1141  * Allocate dma memory from the css global pool. Intended for memory not
1142  * specific to any single device within the css. The allocated memory
1143  * is not guaranteed to be 31-bit addressable.
1144  *
1145  * Caution: Not suitable for early stuff like console.
1146  */
cio_dma_zalloc(size_t size)1147 void *cio_dma_zalloc(size_t size)
1148 {
1149 	return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size);
1150 }
1151 
cio_dma_free(void * cpu_addr,size_t size)1152 void cio_dma_free(void *cpu_addr, size_t size)
1153 {
1154 	cio_gp_dma_free(cio_dma_pool, cpu_addr, size);
1155 }
1156 
1157 /*
1158  * Now that the driver core is running, we can setup our channel subsystem.
1159  * The struct subchannel's are created during probing.
1160  */
css_bus_init(void)1161 static int __init css_bus_init(void)
1162 {
1163 	int ret, i;
1164 
1165 	ret = chsc_init();
1166 	if (ret)
1167 		return ret;
1168 
1169 	chsc_determine_css_characteristics();
1170 	/* Try to enable MSS. */
1171 	ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
1172 	if (ret)
1173 		max_ssid = 0;
1174 	else /* Success. */
1175 		max_ssid = __MAX_SSID;
1176 
1177 	ret = slow_subchannel_init();
1178 	if (ret)
1179 		goto out;
1180 
1181 	ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
1182 	if (ret)
1183 		goto out;
1184 
1185 	if ((ret = bus_register(&css_bus_type)))
1186 		goto out;
1187 
1188 	/* Setup css structure. */
1189 	for (i = 0; i <= MAX_CSS_IDX; i++) {
1190 		ret = setup_css(i);
1191 		if (ret)
1192 			goto out_unregister;
1193 	}
1194 	ret = register_reboot_notifier(&css_reboot_notifier);
1195 	if (ret)
1196 		goto out_unregister;
1197 	ret = cio_dma_pool_init();
1198 	if (ret)
1199 		goto out_unregister_rn;
1200 	airq_init();
1201 	css_init_done = 1;
1202 
1203 	/* Enable default isc for I/O subchannels. */
1204 	isc_register(IO_SCH_ISC);
1205 
1206 	return 0;
1207 out_unregister_rn:
1208 	unregister_reboot_notifier(&css_reboot_notifier);
1209 out_unregister:
1210 	while (i-- > 0) {
1211 		struct channel_subsystem *css = channel_subsystems[i];
1212 		device_unregister(&css->pseudo_subchannel->dev);
1213 		device_unregister(&css->device);
1214 	}
1215 	bus_unregister(&css_bus_type);
1216 out:
1217 	crw_unregister_handler(CRW_RSC_SCH);
1218 	idset_free(slow_subchannel_set);
1219 	chsc_init_cleanup();
1220 	pr_alert("The CSS device driver initialization failed with "
1221 		 "errno=%d\n", ret);
1222 	return ret;
1223 }
1224 
css_bus_cleanup(void)1225 static void __init css_bus_cleanup(void)
1226 {
1227 	struct channel_subsystem *css;
1228 
1229 	for_each_css(css) {
1230 		device_unregister(&css->pseudo_subchannel->dev);
1231 		device_unregister(&css->device);
1232 	}
1233 	bus_unregister(&css_bus_type);
1234 	crw_unregister_handler(CRW_RSC_SCH);
1235 	idset_free(slow_subchannel_set);
1236 	chsc_init_cleanup();
1237 	isc_unregister(IO_SCH_ISC);
1238 }
1239 
channel_subsystem_init(void)1240 static int __init channel_subsystem_init(void)
1241 {
1242 	int ret;
1243 
1244 	ret = css_bus_init();
1245 	if (ret)
1246 		return ret;
1247 	cio_work_q = create_singlethread_workqueue("cio");
1248 	if (!cio_work_q) {
1249 		ret = -ENOMEM;
1250 		goto out_bus;
1251 	}
1252 	ret = io_subchannel_init();
1253 	if (ret)
1254 		goto out_wq;
1255 
1256 	/* Register subchannels which are already in use. */
1257 	cio_register_early_subchannels();
1258 	/* Start initial subchannel evaluation. */
1259 	css_schedule_eval_all();
1260 
1261 	return ret;
1262 out_wq:
1263 	destroy_workqueue(cio_work_q);
1264 out_bus:
1265 	css_bus_cleanup();
1266 	return ret;
1267 }
1268 subsys_initcall(channel_subsystem_init);
1269 
css_settle(struct device_driver * drv,void * unused)1270 static int css_settle(struct device_driver *drv, void *unused)
1271 {
1272 	struct css_driver *cssdrv = to_cssdriver(drv);
1273 
1274 	if (cssdrv->settle)
1275 		return cssdrv->settle();
1276 	return 0;
1277 }
1278 
css_complete_work(void)1279 int css_complete_work(void)
1280 {
1281 	int ret;
1282 
1283 	/* Wait for the evaluation of subchannels to finish. */
1284 	ret = wait_event_interruptible(css_eval_wq,
1285 				       atomic_read(&css_eval_scheduled) == 0);
1286 	if (ret)
1287 		return -EINTR;
1288 	flush_workqueue(cio_work_q);
1289 	/* Wait for the subchannel type specific initialization to finish */
1290 	return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
1291 }
1292 
1293 
1294 /*
1295  * Wait for the initialization of devices to finish, to make sure we are
1296  * done with our setup if the search for the root device starts.
1297  */
channel_subsystem_init_sync(void)1298 static int __init channel_subsystem_init_sync(void)
1299 {
1300 	css_complete_work();
1301 	return 0;
1302 }
1303 subsys_initcall_sync(channel_subsystem_init_sync);
1304 
1305 #ifdef CONFIG_PROC_FS
cio_settle_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)1306 static ssize_t cio_settle_write(struct file *file, const char __user *buf,
1307 				size_t count, loff_t *ppos)
1308 {
1309 	int ret;
1310 
1311 	/* Handle pending CRW's. */
1312 	crw_wait_for_channel_report();
1313 	ret = css_complete_work();
1314 
1315 	return ret ? ret : count;
1316 }
1317 
1318 static const struct proc_ops cio_settle_proc_ops = {
1319 	.proc_open	= nonseekable_open,
1320 	.proc_write	= cio_settle_write,
1321 	.proc_lseek	= no_llseek,
1322 };
1323 
cio_settle_init(void)1324 static int __init cio_settle_init(void)
1325 {
1326 	struct proc_dir_entry *entry;
1327 
1328 	entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops);
1329 	if (!entry)
1330 		return -ENOMEM;
1331 	return 0;
1332 }
1333 device_initcall(cio_settle_init);
1334 #endif /*CONFIG_PROC_FS*/
1335 
sch_is_pseudo_sch(struct subchannel * sch)1336 int sch_is_pseudo_sch(struct subchannel *sch)
1337 {
1338 	if (!sch->dev.parent)
1339 		return 0;
1340 	return sch == to_css(sch->dev.parent)->pseudo_subchannel;
1341 }
1342 
css_bus_match(struct device * dev,struct device_driver * drv)1343 static int css_bus_match(struct device *dev, struct device_driver *drv)
1344 {
1345 	struct subchannel *sch = to_subchannel(dev);
1346 	struct css_driver *driver = to_cssdriver(drv);
1347 	struct css_device_id *id;
1348 
1349 	/* When driver_override is set, only bind to the matching driver */
1350 	if (sch->driver_override && strcmp(sch->driver_override, drv->name))
1351 		return 0;
1352 
1353 	for (id = driver->subchannel_type; id->match_flags; id++) {
1354 		if (sch->st == id->type)
1355 			return 1;
1356 	}
1357 
1358 	return 0;
1359 }
1360 
css_probe(struct device * dev)1361 static int css_probe(struct device *dev)
1362 {
1363 	struct subchannel *sch;
1364 	int ret;
1365 
1366 	sch = to_subchannel(dev);
1367 	sch->driver = to_cssdriver(dev->driver);
1368 	ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
1369 	if (ret)
1370 		sch->driver = NULL;
1371 	return ret;
1372 }
1373 
css_remove(struct device * dev)1374 static int css_remove(struct device *dev)
1375 {
1376 	struct subchannel *sch;
1377 	int ret;
1378 
1379 	sch = to_subchannel(dev);
1380 	ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
1381 	sch->driver = NULL;
1382 	return ret;
1383 }
1384 
css_shutdown(struct device * dev)1385 static void css_shutdown(struct device *dev)
1386 {
1387 	struct subchannel *sch;
1388 
1389 	sch = to_subchannel(dev);
1390 	if (sch->driver && sch->driver->shutdown)
1391 		sch->driver->shutdown(sch);
1392 }
1393 
css_uevent(struct device * dev,struct kobj_uevent_env * env)1394 static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
1395 {
1396 	struct subchannel *sch = to_subchannel(dev);
1397 	int ret;
1398 
1399 	ret = add_uevent_var(env, "ST=%01X", sch->st);
1400 	if (ret)
1401 		return ret;
1402 	ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
1403 	return ret;
1404 }
1405 
1406 static struct bus_type css_bus_type = {
1407 	.name     = "css",
1408 	.match    = css_bus_match,
1409 	.probe    = css_probe,
1410 	.remove   = css_remove,
1411 	.shutdown = css_shutdown,
1412 	.uevent   = css_uevent,
1413 };
1414 
1415 /**
1416  * css_driver_register - register a css driver
1417  * @cdrv: css driver to register
1418  *
1419  * This is mainly a wrapper around driver_register that sets name
1420  * and bus_type in the embedded struct device_driver correctly.
1421  */
css_driver_register(struct css_driver * cdrv)1422 int css_driver_register(struct css_driver *cdrv)
1423 {
1424 	cdrv->drv.bus = &css_bus_type;
1425 	return driver_register(&cdrv->drv);
1426 }
1427 EXPORT_SYMBOL_GPL(css_driver_register);
1428 
1429 /**
1430  * css_driver_unregister - unregister a css driver
1431  * @cdrv: css driver to unregister
1432  *
1433  * This is a wrapper around driver_unregister.
1434  */
css_driver_unregister(struct css_driver * cdrv)1435 void css_driver_unregister(struct css_driver *cdrv)
1436 {
1437 	driver_unregister(&cdrv->drv);
1438 }
1439 EXPORT_SYMBOL_GPL(css_driver_unregister);
1440