xref: /linux/drivers/cxl/core/hdm.c (revision 6c8c1406)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3 #include <linux/io-64-nonatomic-hi-lo.h>
4 #include <linux/seq_file.h>
5 #include <linux/device.h>
6 #include <linux/delay.h>
7 
8 #include "cxlmem.h"
9 #include "core.h"
10 
11 /**
12  * DOC: cxl core hdm
13  *
14  * Compute Express Link Host Managed Device Memory, starting with the
15  * CXL 2.0 specification, is managed by an array of HDM Decoder register
16  * instances per CXL port and per CXL endpoint. Define common helpers
17  * for enumerating these registers and capabilities.
18  */
19 
20 DECLARE_RWSEM(cxl_dpa_rwsem);
21 
22 static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
23 			   int *target_map)
24 {
25 	int rc;
26 
27 	rc = cxl_decoder_add_locked(cxld, target_map);
28 	if (rc) {
29 		put_device(&cxld->dev);
30 		dev_err(&port->dev, "Failed to add decoder\n");
31 		return rc;
32 	}
33 
34 	rc = cxl_decoder_autoremove(&port->dev, cxld);
35 	if (rc)
36 		return rc;
37 
38 	dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
39 
40 	return 0;
41 }
42 
43 /*
44  * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure)
45  * single ported host-bridges need not publish a decoder capability when a
46  * passthrough decode can be assumed, i.e. all transactions that the uport sees
47  * are claimed and passed to the single dport. Disable the range until the first
48  * CXL region is enumerated / activated.
49  */
50 int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
51 {
52 	struct cxl_switch_decoder *cxlsd;
53 	struct cxl_dport *dport = NULL;
54 	int single_port_map[1];
55 	unsigned long index;
56 
57 	cxlsd = cxl_switch_decoder_alloc(port, 1);
58 	if (IS_ERR(cxlsd))
59 		return PTR_ERR(cxlsd);
60 
61 	device_lock_assert(&port->dev);
62 
63 	xa_for_each(&port->dports, index, dport)
64 		break;
65 	single_port_map[0] = dport->port_id;
66 
67 	return add_hdm_decoder(port, &cxlsd->cxld, single_port_map);
68 }
69 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL);
70 
71 static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
72 {
73 	u32 hdm_cap;
74 
75 	hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET);
76 	cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap);
77 	cxlhdm->target_count =
78 		FIELD_GET(CXL_HDM_DECODER_TARGET_COUNT_MASK, hdm_cap);
79 	if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_11_8, hdm_cap))
80 		cxlhdm->interleave_mask |= GENMASK(11, 8);
81 	if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap))
82 		cxlhdm->interleave_mask |= GENMASK(14, 12);
83 }
84 
85 static void __iomem *map_hdm_decoder_regs(struct cxl_port *port,
86 					  void __iomem *crb)
87 {
88 	struct cxl_component_reg_map map;
89 
90 	cxl_probe_component_regs(&port->dev, crb, &map);
91 	if (!map.hdm_decoder.valid) {
92 		dev_err(&port->dev, "HDM decoder registers invalid\n");
93 		return IOMEM_ERR_PTR(-ENXIO);
94 	}
95 
96 	return crb + map.hdm_decoder.offset;
97 }
98 
99 /**
100  * devm_cxl_setup_hdm - map HDM decoder component registers
101  * @port: cxl_port to map
102  */
103 struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port)
104 {
105 	struct device *dev = &port->dev;
106 	void __iomem *crb, *hdm;
107 	struct cxl_hdm *cxlhdm;
108 
109 	cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
110 	if (!cxlhdm)
111 		return ERR_PTR(-ENOMEM);
112 
113 	cxlhdm->port = port;
114 	crb = devm_cxl_iomap_block(dev, port->component_reg_phys,
115 				   CXL_COMPONENT_REG_BLOCK_SIZE);
116 	if (!crb) {
117 		dev_err(dev, "No component registers mapped\n");
118 		return ERR_PTR(-ENXIO);
119 	}
120 
121 	hdm = map_hdm_decoder_regs(port, crb);
122 	if (IS_ERR(hdm))
123 		return ERR_CAST(hdm);
124 	cxlhdm->regs.hdm_decoder = hdm;
125 
126 	parse_hdm_decoder_caps(cxlhdm);
127 	if (cxlhdm->decoder_count == 0) {
128 		dev_err(dev, "Spec violation. Caps invalid\n");
129 		return ERR_PTR(-ENXIO);
130 	}
131 
132 	dev_set_drvdata(dev, cxlhdm);
133 
134 	return cxlhdm;
135 }
136 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL);
137 
138 static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
139 {
140 	unsigned long long start = r->start, end = r->end;
141 
142 	seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end,
143 		   r->name);
144 }
145 
146 void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
147 {
148 	struct resource *p1, *p2;
149 
150 	down_read(&cxl_dpa_rwsem);
151 	for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
152 		__cxl_dpa_debug(file, p1, 0);
153 		for (p2 = p1->child; p2; p2 = p2->sibling)
154 			__cxl_dpa_debug(file, p2, 1);
155 	}
156 	up_read(&cxl_dpa_rwsem);
157 }
158 EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL);
159 
160 /*
161  * Must be called in a context that synchronizes against this decoder's
162  * port ->remove() callback (like an endpoint decoder sysfs attribute)
163  */
164 static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
165 {
166 	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
167 	struct cxl_port *port = cxled_to_port(cxled);
168 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
169 	struct resource *res = cxled->dpa_res;
170 	resource_size_t skip_start;
171 
172 	lockdep_assert_held_write(&cxl_dpa_rwsem);
173 
174 	/* save @skip_start, before @res is released */
175 	skip_start = res->start - cxled->skip;
176 	__release_region(&cxlds->dpa_res, res->start, resource_size(res));
177 	if (cxled->skip)
178 		__release_region(&cxlds->dpa_res, skip_start, cxled->skip);
179 	cxled->skip = 0;
180 	cxled->dpa_res = NULL;
181 	put_device(&cxled->cxld.dev);
182 	port->hdm_end--;
183 }
184 
185 static void cxl_dpa_release(void *cxled)
186 {
187 	down_write(&cxl_dpa_rwsem);
188 	__cxl_dpa_release(cxled);
189 	up_write(&cxl_dpa_rwsem);
190 }
191 
192 /*
193  * Must be called from context that will not race port device
194  * unregistration, like decoder sysfs attribute methods
195  */
196 static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
197 {
198 	struct cxl_port *port = cxled_to_port(cxled);
199 
200 	lockdep_assert_held_write(&cxl_dpa_rwsem);
201 	devm_remove_action(&port->dev, cxl_dpa_release, cxled);
202 	__cxl_dpa_release(cxled);
203 }
204 
205 static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
206 			     resource_size_t base, resource_size_t len,
207 			     resource_size_t skipped)
208 {
209 	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
210 	struct cxl_port *port = cxled_to_port(cxled);
211 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
212 	struct device *dev = &port->dev;
213 	struct resource *res;
214 
215 	lockdep_assert_held_write(&cxl_dpa_rwsem);
216 
217 	if (!len)
218 		goto success;
219 
220 	if (cxled->dpa_res) {
221 		dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n",
222 			port->id, cxled->cxld.id, cxled->dpa_res);
223 		return -EBUSY;
224 	}
225 
226 	if (port->hdm_end + 1 != cxled->cxld.id) {
227 		/*
228 		 * Assumes alloc and commit order is always in hardware instance
229 		 * order per expectations from 8.2.5.12.20 Committing Decoder
230 		 * Programming that enforce decoder[m] committed before
231 		 * decoder[m+1] commit start.
232 		 */
233 		dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id,
234 			cxled->cxld.id, port->id, port->hdm_end + 1);
235 		return -EBUSY;
236 	}
237 
238 	if (skipped) {
239 		res = __request_region(&cxlds->dpa_res, base - skipped, skipped,
240 				       dev_name(&cxled->cxld.dev), 0);
241 		if (!res) {
242 			dev_dbg(dev,
243 				"decoder%d.%d: failed to reserve skipped space\n",
244 				port->id, cxled->cxld.id);
245 			return -EBUSY;
246 		}
247 	}
248 	res = __request_region(&cxlds->dpa_res, base, len,
249 			       dev_name(&cxled->cxld.dev), 0);
250 	if (!res) {
251 		dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n",
252 			port->id, cxled->cxld.id);
253 		if (skipped)
254 			__release_region(&cxlds->dpa_res, base - skipped,
255 					 skipped);
256 		return -EBUSY;
257 	}
258 	cxled->dpa_res = res;
259 	cxled->skip = skipped;
260 
261 	if (resource_contains(&cxlds->pmem_res, res))
262 		cxled->mode = CXL_DECODER_PMEM;
263 	else if (resource_contains(&cxlds->ram_res, res))
264 		cxled->mode = CXL_DECODER_RAM;
265 	else {
266 		dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id,
267 			cxled->cxld.id, cxled->dpa_res);
268 		cxled->mode = CXL_DECODER_MIXED;
269 	}
270 
271 success:
272 	port->hdm_end++;
273 	get_device(&cxled->cxld.dev);
274 	return 0;
275 }
276 
277 static int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
278 				resource_size_t base, resource_size_t len,
279 				resource_size_t skipped)
280 {
281 	struct cxl_port *port = cxled_to_port(cxled);
282 	int rc;
283 
284 	down_write(&cxl_dpa_rwsem);
285 	rc = __cxl_dpa_reserve(cxled, base, len, skipped);
286 	up_write(&cxl_dpa_rwsem);
287 
288 	if (rc)
289 		return rc;
290 
291 	return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
292 }
293 
294 resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
295 {
296 	resource_size_t size = 0;
297 
298 	down_read(&cxl_dpa_rwsem);
299 	if (cxled->dpa_res)
300 		size = resource_size(cxled->dpa_res);
301 	up_read(&cxl_dpa_rwsem);
302 
303 	return size;
304 }
305 
306 resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
307 {
308 	resource_size_t base = -1;
309 
310 	down_read(&cxl_dpa_rwsem);
311 	if (cxled->dpa_res)
312 		base = cxled->dpa_res->start;
313 	up_read(&cxl_dpa_rwsem);
314 
315 	return base;
316 }
317 
318 int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
319 {
320 	struct cxl_port *port = cxled_to_port(cxled);
321 	struct device *dev = &cxled->cxld.dev;
322 	int rc;
323 
324 	down_write(&cxl_dpa_rwsem);
325 	if (!cxled->dpa_res) {
326 		rc = 0;
327 		goto out;
328 	}
329 	if (cxled->cxld.region) {
330 		dev_dbg(dev, "decoder assigned to: %s\n",
331 			dev_name(&cxled->cxld.region->dev));
332 		rc = -EBUSY;
333 		goto out;
334 	}
335 	if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
336 		dev_dbg(dev, "decoder enabled\n");
337 		rc = -EBUSY;
338 		goto out;
339 	}
340 	if (cxled->cxld.id != port->hdm_end) {
341 		dev_dbg(dev, "expected decoder%d.%d\n", port->id,
342 			port->hdm_end);
343 		rc = -EBUSY;
344 		goto out;
345 	}
346 	devm_cxl_dpa_release(cxled);
347 	rc = 0;
348 out:
349 	up_write(&cxl_dpa_rwsem);
350 	return rc;
351 }
352 
353 int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
354 		     enum cxl_decoder_mode mode)
355 {
356 	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
357 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
358 	struct device *dev = &cxled->cxld.dev;
359 	int rc;
360 
361 	switch (mode) {
362 	case CXL_DECODER_RAM:
363 	case CXL_DECODER_PMEM:
364 		break;
365 	default:
366 		dev_dbg(dev, "unsupported mode: %d\n", mode);
367 		return -EINVAL;
368 	}
369 
370 	down_write(&cxl_dpa_rwsem);
371 	if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
372 		rc = -EBUSY;
373 		goto out;
374 	}
375 
376 	/*
377 	 * Only allow modes that are supported by the current partition
378 	 * configuration
379 	 */
380 	if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) {
381 		dev_dbg(dev, "no available pmem capacity\n");
382 		rc = -ENXIO;
383 		goto out;
384 	}
385 	if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) {
386 		dev_dbg(dev, "no available ram capacity\n");
387 		rc = -ENXIO;
388 		goto out;
389 	}
390 
391 	cxled->mode = mode;
392 	rc = 0;
393 out:
394 	up_write(&cxl_dpa_rwsem);
395 
396 	return rc;
397 }
398 
399 int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
400 {
401 	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
402 	resource_size_t free_ram_start, free_pmem_start;
403 	struct cxl_port *port = cxled_to_port(cxled);
404 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
405 	struct device *dev = &cxled->cxld.dev;
406 	resource_size_t start, avail, skip;
407 	struct resource *p, *last;
408 	int rc;
409 
410 	down_write(&cxl_dpa_rwsem);
411 	if (cxled->cxld.region) {
412 		dev_dbg(dev, "decoder attached to %s\n",
413 			dev_name(&cxled->cxld.region->dev));
414 		rc = -EBUSY;
415 		goto out;
416 	}
417 
418 	if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
419 		dev_dbg(dev, "decoder enabled\n");
420 		rc = -EBUSY;
421 		goto out;
422 	}
423 
424 	for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling)
425 		last = p;
426 	if (last)
427 		free_ram_start = last->end + 1;
428 	else
429 		free_ram_start = cxlds->ram_res.start;
430 
431 	for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling)
432 		last = p;
433 	if (last)
434 		free_pmem_start = last->end + 1;
435 	else
436 		free_pmem_start = cxlds->pmem_res.start;
437 
438 	if (cxled->mode == CXL_DECODER_RAM) {
439 		start = free_ram_start;
440 		avail = cxlds->ram_res.end - start + 1;
441 		skip = 0;
442 	} else if (cxled->mode == CXL_DECODER_PMEM) {
443 		resource_size_t skip_start, skip_end;
444 
445 		start = free_pmem_start;
446 		avail = cxlds->pmem_res.end - start + 1;
447 		skip_start = free_ram_start;
448 
449 		/*
450 		 * If some pmem is already allocated, then that allocation
451 		 * already handled the skip.
452 		 */
453 		if (cxlds->pmem_res.child &&
454 		    skip_start == cxlds->pmem_res.child->start)
455 			skip_end = skip_start - 1;
456 		else
457 			skip_end = start - 1;
458 		skip = skip_end - skip_start + 1;
459 	} else {
460 		dev_dbg(dev, "mode not set\n");
461 		rc = -EINVAL;
462 		goto out;
463 	}
464 
465 	if (size > avail) {
466 		dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
467 			cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem",
468 			&avail);
469 		rc = -ENOSPC;
470 		goto out;
471 	}
472 
473 	rc = __cxl_dpa_reserve(cxled, start, size, skip);
474 out:
475 	up_write(&cxl_dpa_rwsem);
476 
477 	if (rc)
478 		return rc;
479 
480 	return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
481 }
482 
483 static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
484 {
485 	u16 eig;
486 	u8 eiw;
487 
488 	/*
489 	 * Input validation ensures these warns never fire, but otherwise
490 	 * suppress unititalized variable usage warnings.
491 	 */
492 	if (WARN_ONCE(ways_to_cxl(cxld->interleave_ways, &eiw),
493 		      "invalid interleave_ways: %d\n", cxld->interleave_ways))
494 		return;
495 	if (WARN_ONCE(granularity_to_cxl(cxld->interleave_granularity, &eig),
496 		      "invalid interleave_granularity: %d\n",
497 		      cxld->interleave_granularity))
498 		return;
499 
500 	u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK);
501 	u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK);
502 	*ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT;
503 }
504 
505 static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
506 {
507 	u32p_replace_bits(ctrl, !!(cxld->target_type == 3),
508 			  CXL_HDM_DECODER0_CTRL_TYPE);
509 }
510 
511 static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
512 {
513 	struct cxl_dport **t = &cxlsd->target[0];
514 	int ways = cxlsd->cxld.interleave_ways;
515 
516 	if (dev_WARN_ONCE(&cxlsd->cxld.dev,
517 			  ways > 8 || ways > cxlsd->nr_targets,
518 			  "ways: %d overflows targets: %d\n", ways,
519 			  cxlsd->nr_targets))
520 		return -ENXIO;
521 
522 	*tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
523 	if (ways > 1)
524 		*tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
525 	if (ways > 2)
526 		*tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id);
527 	if (ways > 3)
528 		*tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id);
529 	if (ways > 4)
530 		*tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id);
531 	if (ways > 5)
532 		*tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id);
533 	if (ways > 6)
534 		*tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
535 	if (ways > 7)
536 		*tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
537 
538 	return 0;
539 }
540 
541 /*
542  * Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set
543  * committed or error within 10ms, but just be generous with 20ms to account for
544  * clock skew and other marginal behavior
545  */
546 #define COMMIT_TIMEOUT_MS 20
547 static int cxld_await_commit(void __iomem *hdm, int id)
548 {
549 	u32 ctrl;
550 	int i;
551 
552 	for (i = 0; i < COMMIT_TIMEOUT_MS; i++) {
553 		ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
554 		if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) {
555 			ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
556 			writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
557 			return -EIO;
558 		}
559 		if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
560 			return 0;
561 		fsleep(1000);
562 	}
563 
564 	return -ETIMEDOUT;
565 }
566 
567 static int cxl_decoder_commit(struct cxl_decoder *cxld)
568 {
569 	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
570 	struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
571 	void __iomem *hdm = cxlhdm->regs.hdm_decoder;
572 	int id = cxld->id, rc;
573 	u64 base, size;
574 	u32 ctrl;
575 
576 	if (cxld->flags & CXL_DECODER_F_ENABLE)
577 		return 0;
578 
579 	if (port->commit_end + 1 != id) {
580 		dev_dbg(&port->dev,
581 			"%s: out of order commit, expected decoder%d.%d\n",
582 			dev_name(&cxld->dev), port->id, port->commit_end + 1);
583 		return -EBUSY;
584 	}
585 
586 	down_read(&cxl_dpa_rwsem);
587 	/* common decoder settings */
588 	ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
589 	cxld_set_interleave(cxld, &ctrl);
590 	cxld_set_type(cxld, &ctrl);
591 	base = cxld->hpa_range.start;
592 	size = range_len(&cxld->hpa_range);
593 
594 	writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
595 	writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
596 	writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
597 	writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
598 
599 	if (is_switch_decoder(&cxld->dev)) {
600 		struct cxl_switch_decoder *cxlsd =
601 			to_cxl_switch_decoder(&cxld->dev);
602 		void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id);
603 		void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
604 		u64 targets;
605 
606 		rc = cxlsd_set_targets(cxlsd, &targets);
607 		if (rc) {
608 			dev_dbg(&port->dev, "%s: target configuration error\n",
609 				dev_name(&cxld->dev));
610 			goto err;
611 		}
612 
613 		writel(upper_32_bits(targets), tl_hi);
614 		writel(lower_32_bits(targets), tl_lo);
615 	} else {
616 		struct cxl_endpoint_decoder *cxled =
617 			to_cxl_endpoint_decoder(&cxld->dev);
618 		void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id);
619 		void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id);
620 
621 		writel(upper_32_bits(cxled->skip), sk_hi);
622 		writel(lower_32_bits(cxled->skip), sk_lo);
623 	}
624 
625 	writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
626 	up_read(&cxl_dpa_rwsem);
627 
628 	port->commit_end++;
629 	rc = cxld_await_commit(hdm, cxld->id);
630 err:
631 	if (rc) {
632 		dev_dbg(&port->dev, "%s: error %d committing decoder\n",
633 			dev_name(&cxld->dev), rc);
634 		cxld->reset(cxld);
635 		return rc;
636 	}
637 	cxld->flags |= CXL_DECODER_F_ENABLE;
638 
639 	return 0;
640 }
641 
642 static int cxl_decoder_reset(struct cxl_decoder *cxld)
643 {
644 	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
645 	struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
646 	void __iomem *hdm = cxlhdm->regs.hdm_decoder;
647 	int id = cxld->id;
648 	u32 ctrl;
649 
650 	if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
651 		return 0;
652 
653 	if (port->commit_end != id) {
654 		dev_dbg(&port->dev,
655 			"%s: out of order reset, expected decoder%d.%d\n",
656 			dev_name(&cxld->dev), port->id, port->commit_end);
657 		return -EBUSY;
658 	}
659 
660 	down_read(&cxl_dpa_rwsem);
661 	ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
662 	ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
663 	writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
664 
665 	writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
666 	writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
667 	writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
668 	writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
669 	up_read(&cxl_dpa_rwsem);
670 
671 	port->commit_end--;
672 	cxld->flags &= ~CXL_DECODER_F_ENABLE;
673 
674 	return 0;
675 }
676 
677 static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
678 			    int *target_map, void __iomem *hdm, int which,
679 			    u64 *dpa_base)
680 {
681 	struct cxl_endpoint_decoder *cxled = NULL;
682 	u64 size, base, skip, dpa_size;
683 	bool committed;
684 	u32 remainder;
685 	int i, rc;
686 	u32 ctrl;
687 	union {
688 		u64 value;
689 		unsigned char target_id[8];
690 	} target_list;
691 
692 	if (is_endpoint_decoder(&cxld->dev))
693 		cxled = to_cxl_endpoint_decoder(&cxld->dev);
694 
695 	ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
696 	base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
697 	size = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
698 	committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED);
699 	cxld->commit = cxl_decoder_commit;
700 	cxld->reset = cxl_decoder_reset;
701 
702 	if (!committed)
703 		size = 0;
704 	if (base == U64_MAX || size == U64_MAX) {
705 		dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n",
706 			 port->id, cxld->id);
707 		return -ENXIO;
708 	}
709 
710 	cxld->hpa_range = (struct range) {
711 		.start = base,
712 		.end = base + size - 1,
713 	};
714 
715 	/* decoders are enabled if committed */
716 	if (committed) {
717 		cxld->flags |= CXL_DECODER_F_ENABLE;
718 		if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
719 			cxld->flags |= CXL_DECODER_F_LOCK;
720 		if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl))
721 			cxld->target_type = CXL_DECODER_EXPANDER;
722 		else
723 			cxld->target_type = CXL_DECODER_ACCELERATOR;
724 		if (cxld->id != port->commit_end + 1) {
725 			dev_warn(&port->dev,
726 				 "decoder%d.%d: Committed out of order\n",
727 				 port->id, cxld->id);
728 			return -ENXIO;
729 		}
730 		port->commit_end = cxld->id;
731 	} else {
732 		/* unless / until type-2 drivers arrive, assume type-3 */
733 		if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl) == 0) {
734 			ctrl |= CXL_HDM_DECODER0_CTRL_TYPE;
735 			writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
736 		}
737 		cxld->target_type = CXL_DECODER_EXPANDER;
738 	}
739 	rc = cxl_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl),
740 			 &cxld->interleave_ways);
741 	if (rc) {
742 		dev_warn(&port->dev,
743 			 "decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n",
744 			 port->id, cxld->id, ctrl);
745 		return rc;
746 	}
747 	rc = cxl_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
748 				&cxld->interleave_granularity);
749 	if (rc)
750 		return rc;
751 
752 	if (!cxled) {
753 		target_list.value =
754 			ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
755 		for (i = 0; i < cxld->interleave_ways; i++)
756 			target_map[i] = target_list.target_id[i];
757 
758 		return 0;
759 	}
760 
761 	if (!committed)
762 		return 0;
763 
764 	dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder);
765 	if (remainder) {
766 		dev_err(&port->dev,
767 			"decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n",
768 			port->id, cxld->id, size, cxld->interleave_ways);
769 		return -ENXIO;
770 	}
771 	skip = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
772 	rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
773 	if (rc) {
774 		dev_err(&port->dev,
775 			"decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
776 			port->id, cxld->id, *dpa_base,
777 			*dpa_base + dpa_size + skip - 1, rc);
778 		return rc;
779 	}
780 	*dpa_base += dpa_size + skip;
781 	return 0;
782 }
783 
784 /**
785  * devm_cxl_enumerate_decoders - add decoder objects per HDM register set
786  * @cxlhdm: Structure to populate with HDM capabilities
787  */
788 int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
789 {
790 	void __iomem *hdm = cxlhdm->regs.hdm_decoder;
791 	struct cxl_port *port = cxlhdm->port;
792 	int i, committed;
793 	u64 dpa_base = 0;
794 	u32 ctrl;
795 
796 	/*
797 	 * Since the register resource was recently claimed via request_region()
798 	 * be careful about trusting the "not-committed" status until the commit
799 	 * timeout has elapsed.  The commit timeout is 10ms (CXL 2.0
800 	 * 8.2.5.12.20), but double it to be tolerant of any clock skew between
801 	 * host and target.
802 	 */
803 	for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) {
804 		ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
805 		if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED)
806 			committed++;
807 	}
808 
809 	/* ensure that future checks of committed can be trusted */
810 	if (committed != cxlhdm->decoder_count)
811 		msleep(20);
812 
813 	for (i = 0; i < cxlhdm->decoder_count; i++) {
814 		int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
815 		int rc, target_count = cxlhdm->target_count;
816 		struct cxl_decoder *cxld;
817 
818 		if (is_cxl_endpoint(port)) {
819 			struct cxl_endpoint_decoder *cxled;
820 
821 			cxled = cxl_endpoint_decoder_alloc(port);
822 			if (IS_ERR(cxled)) {
823 				dev_warn(&port->dev,
824 					 "Failed to allocate the decoder\n");
825 				return PTR_ERR(cxled);
826 			}
827 			cxld = &cxled->cxld;
828 		} else {
829 			struct cxl_switch_decoder *cxlsd;
830 
831 			cxlsd = cxl_switch_decoder_alloc(port, target_count);
832 			if (IS_ERR(cxlsd)) {
833 				dev_warn(&port->dev,
834 					 "Failed to allocate the decoder\n");
835 				return PTR_ERR(cxlsd);
836 			}
837 			cxld = &cxlsd->cxld;
838 		}
839 
840 		rc = init_hdm_decoder(port, cxld, target_map, hdm, i, &dpa_base);
841 		if (rc) {
842 			put_device(&cxld->dev);
843 			return rc;
844 		}
845 		rc = add_hdm_decoder(port, cxld, target_map);
846 		if (rc) {
847 			dev_warn(&port->dev,
848 				 "Failed to add decoder to port\n");
849 			return rc;
850 		}
851 	}
852 
853 	return 0;
854 }
855 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL);
856