xref: /linux/tools/testing/cxl/test/cxl.c (revision 0be3ff0c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright(c) 2021 Intel Corporation. All rights reserved.
3 
4 #include <linux/platform_device.h>
5 #include <linux/genalloc.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/acpi.h>
9 #include <linux/pci.h>
10 #include <linux/mm.h>
11 #include <cxlmem.h>
12 #include "mock.h"
13 
14 #define NR_CXL_HOST_BRIDGES 2
15 #define NR_CXL_ROOT_PORTS 2
16 #define NR_CXL_SWITCH_PORTS 2
17 #define NR_CXL_PORT_DECODERS 2
18 
19 static struct platform_device *cxl_acpi;
20 static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES];
21 static struct platform_device
22 	*cxl_root_port[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS];
23 static struct platform_device
24 	*cxl_switch_uport[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS];
25 static struct platform_device
26 	*cxl_switch_dport[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS *
27 			  NR_CXL_SWITCH_PORTS];
28 struct platform_device
29 	*cxl_mem[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * NR_CXL_SWITCH_PORTS];
30 
31 static struct acpi_device acpi0017_mock;
32 static struct acpi_device host_bridge[NR_CXL_HOST_BRIDGES] = {
33 	[0] = {
34 		.handle = &host_bridge[0],
35 	},
36 	[1] = {
37 		.handle = &host_bridge[1],
38 	},
39 };
40 
41 static bool is_mock_dev(struct device *dev)
42 {
43 	int i;
44 
45 	for (i = 0; i < ARRAY_SIZE(cxl_mem); i++)
46 		if (dev == &cxl_mem[i]->dev)
47 			return true;
48 	if (dev == &cxl_acpi->dev)
49 		return true;
50 	return false;
51 }
52 
53 static bool is_mock_adev(struct acpi_device *adev)
54 {
55 	int i;
56 
57 	if (adev == &acpi0017_mock)
58 		return true;
59 
60 	for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
61 		if (adev == &host_bridge[i])
62 			return true;
63 
64 	return false;
65 }
66 
67 static struct {
68 	struct acpi_table_cedt cedt;
69 	struct acpi_cedt_chbs chbs[NR_CXL_HOST_BRIDGES];
70 	struct {
71 		struct acpi_cedt_cfmws cfmws;
72 		u32 target[1];
73 	} cfmws0;
74 	struct {
75 		struct acpi_cedt_cfmws cfmws;
76 		u32 target[2];
77 	} cfmws1;
78 	struct {
79 		struct acpi_cedt_cfmws cfmws;
80 		u32 target[1];
81 	} cfmws2;
82 	struct {
83 		struct acpi_cedt_cfmws cfmws;
84 		u32 target[2];
85 	} cfmws3;
86 } __packed mock_cedt = {
87 	.cedt = {
88 		.header = {
89 			.signature = "CEDT",
90 			.length = sizeof(mock_cedt),
91 			.revision = 1,
92 		},
93 	},
94 	.chbs[0] = {
95 		.header = {
96 			.type = ACPI_CEDT_TYPE_CHBS,
97 			.length = sizeof(mock_cedt.chbs[0]),
98 		},
99 		.uid = 0,
100 		.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
101 	},
102 	.chbs[1] = {
103 		.header = {
104 			.type = ACPI_CEDT_TYPE_CHBS,
105 			.length = sizeof(mock_cedt.chbs[0]),
106 		},
107 		.uid = 1,
108 		.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
109 	},
110 	.cfmws0 = {
111 		.cfmws = {
112 			.header = {
113 				.type = ACPI_CEDT_TYPE_CFMWS,
114 				.length = sizeof(mock_cedt.cfmws0),
115 			},
116 			.interleave_ways = 0,
117 			.granularity = 4,
118 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
119 					ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
120 			.qtg_id = 0,
121 			.window_size = SZ_256M,
122 		},
123 		.target = { 0 },
124 	},
125 	.cfmws1 = {
126 		.cfmws = {
127 			.header = {
128 				.type = ACPI_CEDT_TYPE_CFMWS,
129 				.length = sizeof(mock_cedt.cfmws1),
130 			},
131 			.interleave_ways = 1,
132 			.granularity = 4,
133 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
134 					ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
135 			.qtg_id = 1,
136 			.window_size = SZ_256M * 2,
137 		},
138 		.target = { 0, 1, },
139 	},
140 	.cfmws2 = {
141 		.cfmws = {
142 			.header = {
143 				.type = ACPI_CEDT_TYPE_CFMWS,
144 				.length = sizeof(mock_cedt.cfmws2),
145 			},
146 			.interleave_ways = 0,
147 			.granularity = 4,
148 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
149 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
150 			.qtg_id = 2,
151 			.window_size = SZ_256M,
152 		},
153 		.target = { 0 },
154 	},
155 	.cfmws3 = {
156 		.cfmws = {
157 			.header = {
158 				.type = ACPI_CEDT_TYPE_CFMWS,
159 				.length = sizeof(mock_cedt.cfmws3),
160 			},
161 			.interleave_ways = 1,
162 			.granularity = 4,
163 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
164 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
165 			.qtg_id = 3,
166 			.window_size = SZ_256M * 2,
167 		},
168 		.target = { 0, 1, },
169 	},
170 };
171 
172 struct acpi_cedt_cfmws *mock_cfmws[4] = {
173 	[0] = &mock_cedt.cfmws0.cfmws,
174 	[1] = &mock_cedt.cfmws1.cfmws,
175 	[2] = &mock_cedt.cfmws2.cfmws,
176 	[3] = &mock_cedt.cfmws3.cfmws,
177 };
178 
179 struct cxl_mock_res {
180 	struct list_head list;
181 	struct range range;
182 };
183 
184 static LIST_HEAD(mock_res);
185 static DEFINE_MUTEX(mock_res_lock);
186 static struct gen_pool *cxl_mock_pool;
187 
188 static void depopulate_all_mock_resources(void)
189 {
190 	struct cxl_mock_res *res, *_res;
191 
192 	mutex_lock(&mock_res_lock);
193 	list_for_each_entry_safe(res, _res, &mock_res, list) {
194 		gen_pool_free(cxl_mock_pool, res->range.start,
195 			      range_len(&res->range));
196 		list_del(&res->list);
197 		kfree(res);
198 	}
199 	mutex_unlock(&mock_res_lock);
200 }
201 
202 static struct cxl_mock_res *alloc_mock_res(resource_size_t size)
203 {
204 	struct cxl_mock_res *res = kzalloc(sizeof(*res), GFP_KERNEL);
205 	struct genpool_data_align data = {
206 		.align = SZ_256M,
207 	};
208 	unsigned long phys;
209 
210 	INIT_LIST_HEAD(&res->list);
211 	phys = gen_pool_alloc_algo(cxl_mock_pool, size,
212 				   gen_pool_first_fit_align, &data);
213 	if (!phys)
214 		return NULL;
215 
216 	res->range = (struct range) {
217 		.start = phys,
218 		.end = phys + size - 1,
219 	};
220 	mutex_lock(&mock_res_lock);
221 	list_add(&res->list, &mock_res);
222 	mutex_unlock(&mock_res_lock);
223 
224 	return res;
225 }
226 
227 static int populate_cedt(void)
228 {
229 	struct cxl_mock_res *res;
230 	int i;
231 
232 	for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
233 		struct acpi_cedt_chbs *chbs = &mock_cedt.chbs[i];
234 		resource_size_t size;
235 
236 		if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20)
237 			size = ACPI_CEDT_CHBS_LENGTH_CXL20;
238 		else
239 			size = ACPI_CEDT_CHBS_LENGTH_CXL11;
240 
241 		res = alloc_mock_res(size);
242 		if (!res)
243 			return -ENOMEM;
244 		chbs->base = res->range.start;
245 		chbs->length = size;
246 	}
247 
248 	for (i = 0; i < ARRAY_SIZE(mock_cfmws); i++) {
249 		struct acpi_cedt_cfmws *window = mock_cfmws[i];
250 
251 		res = alloc_mock_res(window->window_size);
252 		if (!res)
253 			return -ENOMEM;
254 		window->base_hpa = res->range.start;
255 	}
256 
257 	return 0;
258 }
259 
260 /*
261  * WARNING, this hack assumes the format of 'struct
262  * cxl_cfmws_context' and 'struct cxl_chbs_context' share the property that
263  * the first struct member is the device being probed by the cxl_acpi
264  * driver.
265  */
266 struct cxl_cedt_context {
267 	struct device *dev;
268 };
269 
270 static int mock_acpi_table_parse_cedt(enum acpi_cedt_type id,
271 				      acpi_tbl_entry_handler_arg handler_arg,
272 				      void *arg)
273 {
274 	struct cxl_cedt_context *ctx = arg;
275 	struct device *dev = ctx->dev;
276 	union acpi_subtable_headers *h;
277 	unsigned long end;
278 	int i;
279 
280 	if (dev != &cxl_acpi->dev)
281 		return acpi_table_parse_cedt(id, handler_arg, arg);
282 
283 	if (id == ACPI_CEDT_TYPE_CHBS)
284 		for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
285 			h = (union acpi_subtable_headers *)&mock_cedt.chbs[i];
286 			end = (unsigned long)&mock_cedt.chbs[i + 1];
287 			handler_arg(h, arg, end);
288 		}
289 
290 	if (id == ACPI_CEDT_TYPE_CFMWS)
291 		for (i = 0; i < ARRAY_SIZE(mock_cfmws); i++) {
292 			h = (union acpi_subtable_headers *) mock_cfmws[i];
293 			end = (unsigned long) h + mock_cfmws[i]->header.length;
294 			handler_arg(h, arg, end);
295 		}
296 
297 	return 0;
298 }
299 
300 static bool is_mock_bridge(struct device *dev)
301 {
302 	int i;
303 
304 	for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
305 		if (dev == &cxl_host_bridge[i]->dev)
306 			return true;
307 	return false;
308 }
309 
310 static bool is_mock_port(struct device *dev)
311 {
312 	int i;
313 
314 	if (is_mock_bridge(dev))
315 		return true;
316 
317 	for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++)
318 		if (dev == &cxl_root_port[i]->dev)
319 			return true;
320 
321 	for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++)
322 		if (dev == &cxl_switch_uport[i]->dev)
323 			return true;
324 
325 	for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++)
326 		if (dev == &cxl_switch_dport[i]->dev)
327 			return true;
328 
329 	if (is_cxl_memdev(dev))
330 		return is_mock_dev(dev->parent);
331 
332 	return false;
333 }
334 
335 static int host_bridge_index(struct acpi_device *adev)
336 {
337 	return adev - host_bridge;
338 }
339 
340 static struct acpi_device *find_host_bridge(acpi_handle handle)
341 {
342 	int i;
343 
344 	for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
345 		if (handle == host_bridge[i].handle)
346 			return &host_bridge[i];
347 	return NULL;
348 }
349 
350 static acpi_status
351 mock_acpi_evaluate_integer(acpi_handle handle, acpi_string pathname,
352 			   struct acpi_object_list *arguments,
353 			   unsigned long long *data)
354 {
355 	struct acpi_device *adev = find_host_bridge(handle);
356 
357 	if (!adev || strcmp(pathname, METHOD_NAME__UID) != 0)
358 		return acpi_evaluate_integer(handle, pathname, arguments, data);
359 
360 	*data = host_bridge_index(adev);
361 	return AE_OK;
362 }
363 
364 static struct pci_bus mock_pci_bus[NR_CXL_HOST_BRIDGES];
365 static struct acpi_pci_root mock_pci_root[NR_CXL_HOST_BRIDGES] = {
366 	[0] = {
367 		.bus = &mock_pci_bus[0],
368 	},
369 	[1] = {
370 		.bus = &mock_pci_bus[1],
371 	},
372 };
373 
374 static bool is_mock_bus(struct pci_bus *bus)
375 {
376 	int i;
377 
378 	for (i = 0; i < ARRAY_SIZE(mock_pci_bus); i++)
379 		if (bus == &mock_pci_bus[i])
380 			return true;
381 	return false;
382 }
383 
384 static struct acpi_pci_root *mock_acpi_pci_find_root(acpi_handle handle)
385 {
386 	struct acpi_device *adev = find_host_bridge(handle);
387 
388 	if (!adev)
389 		return acpi_pci_find_root(handle);
390 	return &mock_pci_root[host_bridge_index(adev)];
391 }
392 
393 static struct cxl_hdm *mock_cxl_setup_hdm(struct cxl_port *port)
394 {
395 	struct cxl_hdm *cxlhdm = devm_kzalloc(&port->dev, sizeof(*cxlhdm), GFP_KERNEL);
396 
397 	if (!cxlhdm)
398 		return ERR_PTR(-ENOMEM);
399 
400 	cxlhdm->port = port;
401 	return cxlhdm;
402 }
403 
404 static int mock_cxl_add_passthrough_decoder(struct cxl_port *port)
405 {
406 	dev_err(&port->dev, "unexpected passthrough decoder for cxl_test\n");
407 	return -EOPNOTSUPP;
408 }
409 
410 
411 struct target_map_ctx {
412 	int *target_map;
413 	int index;
414 	int target_count;
415 };
416 
417 static int map_targets(struct device *dev, void *data)
418 {
419 	struct platform_device *pdev = to_platform_device(dev);
420 	struct target_map_ctx *ctx = data;
421 
422 	ctx->target_map[ctx->index++] = pdev->id;
423 
424 	if (ctx->index > ctx->target_count) {
425 		dev_WARN_ONCE(dev, 1, "too many targets found?\n");
426 		return -ENXIO;
427 	}
428 
429 	return 0;
430 }
431 
432 static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
433 {
434 	struct cxl_port *port = cxlhdm->port;
435 	struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
436 	int target_count, i;
437 
438 	if (is_cxl_endpoint(port))
439 		target_count = 0;
440 	else if (is_cxl_root(parent_port))
441 		target_count = NR_CXL_ROOT_PORTS;
442 	else
443 		target_count = NR_CXL_SWITCH_PORTS;
444 
445 	for (i = 0; i < NR_CXL_PORT_DECODERS; i++) {
446 		int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
447 		struct target_map_ctx ctx = {
448 			.target_map = target_map,
449 			.target_count = target_count,
450 		};
451 		struct cxl_decoder *cxld;
452 		int rc;
453 
454 		if (target_count)
455 			cxld = cxl_switch_decoder_alloc(port, target_count);
456 		else
457 			cxld = cxl_endpoint_decoder_alloc(port);
458 		if (IS_ERR(cxld)) {
459 			dev_warn(&port->dev,
460 				 "Failed to allocate the decoder\n");
461 			return PTR_ERR(cxld);
462 		}
463 
464 		cxld->decoder_range = (struct range) {
465 			.start = 0,
466 			.end = -1,
467 		};
468 
469 		cxld->flags = CXL_DECODER_F_ENABLE;
470 		cxld->interleave_ways = min_not_zero(target_count, 1);
471 		cxld->interleave_granularity = SZ_4K;
472 		cxld->target_type = CXL_DECODER_EXPANDER;
473 
474 		if (target_count) {
475 			rc = device_for_each_child(port->uport, &ctx,
476 						   map_targets);
477 			if (rc) {
478 				put_device(&cxld->dev);
479 				return rc;
480 			}
481 		}
482 
483 		rc = cxl_decoder_add_locked(cxld, target_map);
484 		if (rc) {
485 			put_device(&cxld->dev);
486 			dev_err(&port->dev, "Failed to add decoder\n");
487 			return rc;
488 		}
489 
490 		rc = cxl_decoder_autoremove(&port->dev, cxld);
491 		if (rc)
492 			return rc;
493 		dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
494 	}
495 
496 	return 0;
497 }
498 
499 static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
500 {
501 	struct device *dev = &port->dev;
502 	struct platform_device **array;
503 	int i, array_size;
504 
505 	if (port->depth == 1) {
506 		array_size = ARRAY_SIZE(cxl_root_port);
507 		array = cxl_root_port;
508 	} else if (port->depth == 2) {
509 		array_size = ARRAY_SIZE(cxl_switch_dport);
510 		array = cxl_switch_dport;
511 	} else {
512 		dev_WARN_ONCE(&port->dev, 1, "unexpected depth %d\n",
513 			      port->depth);
514 		return -ENXIO;
515 	}
516 
517 	for (i = 0; i < array_size; i++) {
518 		struct platform_device *pdev = array[i];
519 		struct cxl_dport *dport;
520 
521 		if (pdev->dev.parent != port->uport)
522 			continue;
523 
524 		dport = devm_cxl_add_dport(port, &pdev->dev, pdev->id,
525 					   CXL_RESOURCE_NONE);
526 
527 		if (IS_ERR(dport)) {
528 			dev_err(dev, "failed to add dport: %s (%ld)\n",
529 				dev_name(&pdev->dev), PTR_ERR(dport));
530 			return PTR_ERR(dport);
531 		}
532 
533 		dev_dbg(dev, "add dport%d: %s\n", pdev->id,
534 			dev_name(&pdev->dev));
535 	}
536 
537 	return 0;
538 }
539 
540 static struct cxl_mock_ops cxl_mock_ops = {
541 	.is_mock_adev = is_mock_adev,
542 	.is_mock_bridge = is_mock_bridge,
543 	.is_mock_bus = is_mock_bus,
544 	.is_mock_port = is_mock_port,
545 	.is_mock_dev = is_mock_dev,
546 	.acpi_table_parse_cedt = mock_acpi_table_parse_cedt,
547 	.acpi_evaluate_integer = mock_acpi_evaluate_integer,
548 	.acpi_pci_find_root = mock_acpi_pci_find_root,
549 	.devm_cxl_port_enumerate_dports = mock_cxl_port_enumerate_dports,
550 	.devm_cxl_setup_hdm = mock_cxl_setup_hdm,
551 	.devm_cxl_add_passthrough_decoder = mock_cxl_add_passthrough_decoder,
552 	.devm_cxl_enumerate_decoders = mock_cxl_enumerate_decoders,
553 	.list = LIST_HEAD_INIT(cxl_mock_ops.list),
554 };
555 
556 static void mock_companion(struct acpi_device *adev, struct device *dev)
557 {
558 	device_initialize(&adev->dev);
559 	fwnode_init(&adev->fwnode, NULL);
560 	dev->fwnode = &adev->fwnode;
561 	adev->fwnode.dev = dev;
562 }
563 
564 #ifndef SZ_64G
565 #define SZ_64G (SZ_32G * 2)
566 #endif
567 
568 #ifndef SZ_512G
569 #define SZ_512G (SZ_64G * 8)
570 #endif
571 
572 static struct platform_device *alloc_memdev(int id)
573 {
574 	struct resource res[] = {
575 		[0] = {
576 			.flags = IORESOURCE_MEM,
577 		},
578 		[1] = {
579 			.flags = IORESOURCE_MEM,
580 			.desc = IORES_DESC_PERSISTENT_MEMORY,
581 		},
582 	};
583 	struct platform_device *pdev;
584 	int i, rc;
585 
586 	for (i = 0; i < ARRAY_SIZE(res); i++) {
587 		struct cxl_mock_res *r = alloc_mock_res(SZ_256M);
588 
589 		if (!r)
590 			return NULL;
591 		res[i].start = r->range.start;
592 		res[i].end = r->range.end;
593 	}
594 
595 	pdev = platform_device_alloc("cxl_mem", id);
596 	if (!pdev)
597 		return NULL;
598 
599 	rc = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
600 	if (rc)
601 		goto err;
602 
603 	return pdev;
604 
605 err:
606 	platform_device_put(pdev);
607 	return NULL;
608 }
609 
610 static __init int cxl_test_init(void)
611 {
612 	int rc, i;
613 
614 	register_cxl_mock_ops(&cxl_mock_ops);
615 
616 	cxl_mock_pool = gen_pool_create(ilog2(SZ_2M), NUMA_NO_NODE);
617 	if (!cxl_mock_pool) {
618 		rc = -ENOMEM;
619 		goto err_gen_pool_create;
620 	}
621 
622 	rc = gen_pool_add(cxl_mock_pool, SZ_512G, SZ_64G, NUMA_NO_NODE);
623 	if (rc)
624 		goto err_gen_pool_add;
625 
626 	rc = populate_cedt();
627 	if (rc)
628 		goto err_populate;
629 
630 	for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) {
631 		struct acpi_device *adev = &host_bridge[i];
632 		struct platform_device *pdev;
633 
634 		pdev = platform_device_alloc("cxl_host_bridge", i);
635 		if (!pdev)
636 			goto err_bridge;
637 
638 		mock_companion(adev, &pdev->dev);
639 		rc = platform_device_add(pdev);
640 		if (rc) {
641 			platform_device_put(pdev);
642 			goto err_bridge;
643 		}
644 
645 		cxl_host_bridge[i] = pdev;
646 		rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
647 				       "physical_node");
648 		if (rc)
649 			goto err_bridge;
650 	}
651 
652 	for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++) {
653 		struct platform_device *bridge =
654 			cxl_host_bridge[i % ARRAY_SIZE(cxl_host_bridge)];
655 		struct platform_device *pdev;
656 
657 		pdev = platform_device_alloc("cxl_root_port", i);
658 		if (!pdev)
659 			goto err_port;
660 		pdev->dev.parent = &bridge->dev;
661 
662 		rc = platform_device_add(pdev);
663 		if (rc) {
664 			platform_device_put(pdev);
665 			goto err_port;
666 		}
667 		cxl_root_port[i] = pdev;
668 	}
669 
670 	BUILD_BUG_ON(ARRAY_SIZE(cxl_switch_uport) != ARRAY_SIZE(cxl_root_port));
671 	for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++) {
672 		struct platform_device *root_port = cxl_root_port[i];
673 		struct platform_device *pdev;
674 
675 		pdev = platform_device_alloc("cxl_switch_uport", i);
676 		if (!pdev)
677 			goto err_port;
678 		pdev->dev.parent = &root_port->dev;
679 
680 		rc = platform_device_add(pdev);
681 		if (rc) {
682 			platform_device_put(pdev);
683 			goto err_uport;
684 		}
685 		cxl_switch_uport[i] = pdev;
686 	}
687 
688 	for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++) {
689 		struct platform_device *uport =
690 			cxl_switch_uport[i % ARRAY_SIZE(cxl_switch_uport)];
691 		struct platform_device *pdev;
692 
693 		pdev = platform_device_alloc("cxl_switch_dport", i);
694 		if (!pdev)
695 			goto err_port;
696 		pdev->dev.parent = &uport->dev;
697 
698 		rc = platform_device_add(pdev);
699 		if (rc) {
700 			platform_device_put(pdev);
701 			goto err_dport;
702 		}
703 		cxl_switch_dport[i] = pdev;
704 	}
705 
706 	BUILD_BUG_ON(ARRAY_SIZE(cxl_mem) != ARRAY_SIZE(cxl_switch_dport));
707 	for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) {
708 		struct platform_device *dport = cxl_switch_dport[i];
709 		struct platform_device *pdev;
710 
711 		pdev = alloc_memdev(i);
712 		if (!pdev)
713 			goto err_mem;
714 		pdev->dev.parent = &dport->dev;
715 		set_dev_node(&pdev->dev, i % 2);
716 
717 		rc = platform_device_add(pdev);
718 		if (rc) {
719 			platform_device_put(pdev);
720 			goto err_mem;
721 		}
722 		cxl_mem[i] = pdev;
723 	}
724 
725 	cxl_acpi = platform_device_alloc("cxl_acpi", 0);
726 	if (!cxl_acpi)
727 		goto err_mem;
728 
729 	mock_companion(&acpi0017_mock, &cxl_acpi->dev);
730 	acpi0017_mock.dev.bus = &platform_bus_type;
731 
732 	rc = platform_device_add(cxl_acpi);
733 	if (rc)
734 		goto err_add;
735 
736 	return 0;
737 
738 err_add:
739 	platform_device_put(cxl_acpi);
740 err_mem:
741 	for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
742 		platform_device_unregister(cxl_mem[i]);
743 err_dport:
744 	for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
745 		platform_device_unregister(cxl_switch_dport[i]);
746 err_uport:
747 	for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
748 		platform_device_unregister(cxl_switch_uport[i]);
749 err_port:
750 	for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
751 		platform_device_unregister(cxl_root_port[i]);
752 err_bridge:
753 	for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
754 		struct platform_device *pdev = cxl_host_bridge[i];
755 
756 		if (!pdev)
757 			continue;
758 		sysfs_remove_link(&pdev->dev.kobj, "physical_node");
759 		platform_device_unregister(cxl_host_bridge[i]);
760 	}
761 err_populate:
762 	depopulate_all_mock_resources();
763 err_gen_pool_add:
764 	gen_pool_destroy(cxl_mock_pool);
765 err_gen_pool_create:
766 	unregister_cxl_mock_ops(&cxl_mock_ops);
767 	return rc;
768 }
769 
770 static __exit void cxl_test_exit(void)
771 {
772 	int i;
773 
774 	platform_device_unregister(cxl_acpi);
775 	for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
776 		platform_device_unregister(cxl_mem[i]);
777 	for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
778 		platform_device_unregister(cxl_switch_dport[i]);
779 	for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
780 		platform_device_unregister(cxl_switch_uport[i]);
781 	for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
782 		platform_device_unregister(cxl_root_port[i]);
783 	for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
784 		struct platform_device *pdev = cxl_host_bridge[i];
785 
786 		if (!pdev)
787 			continue;
788 		sysfs_remove_link(&pdev->dev.kobj, "physical_node");
789 		platform_device_unregister(cxl_host_bridge[i]);
790 	}
791 	depopulate_all_mock_resources();
792 	gen_pool_destroy(cxl_mock_pool);
793 	unregister_cxl_mock_ops(&cxl_mock_ops);
794 }
795 
796 module_init(cxl_test_init);
797 module_exit(cxl_test_exit);
798 MODULE_LICENSE("GPL v2");
799 MODULE_IMPORT_NS(ACPI);
800 MODULE_IMPORT_NS(CXL);
801