xref: /openbsd/sys/dev/pci/drm/i915/gt/intel_region_lmem.c (revision 72c7c57a)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 #include "i915_pci.h"
8 #include "i915_reg.h"
9 #include "intel_memory_region.h"
10 #include "intel_pci_config.h"
11 #include "intel_region_lmem.h"
12 #include "intel_region_ttm.h"
13 #include "gem/i915_gem_lmem.h"
14 #include "gem/i915_gem_region.h"
15 #include "gem/i915_gem_ttm.h"
16 #include "gt/intel_gt.h"
17 #include "gt/intel_gt_mcr.h"
18 #include "gt/intel_gt_regs.h"
19 
20 #ifdef CONFIG_64BIT
21 static void _release_bars(struct pci_dev *pdev)
22 {
23 	STUB();
24 #ifdef notyet
25 	int resno;
26 
27 	for (resno = PCI_STD_RESOURCES; resno < PCI_STD_RESOURCE_END; resno++) {
28 		if (pci_resource_len(pdev, resno))
29 			pci_release_resource(pdev, resno);
30 	}
31 #endif
32 }
33 
34 static void
35 _resize_bar(struct drm_i915_private *i915, int resno, resource_size_t size)
36 {
37 	STUB();
38 #ifdef notyet
39 	struct pci_dev *pdev = i915->drm.pdev;
40 	int bar_size = pci_rebar_bytes_to_size(size);
41 	int ret;
42 
43 	_release_bars(pdev);
44 
45 	ret = pci_resize_resource(pdev, resno, bar_size);
46 	if (ret) {
47 		drm_info(&i915->drm, "Failed to resize BAR%d to %dM (%pe)\n",
48 			 resno, 1 << bar_size, ERR_PTR(ret));
49 		return;
50 	}
51 
52 	drm_info(&i915->drm, "BAR%d resized to %dM\n", resno, 1 << bar_size);
53 #endif
54 }
55 
56 static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t lmem_size)
57 {
58 	STUB();
59 #ifdef notyet
60 	struct pci_dev *pdev = i915->drm.pdev;
61 	struct pci_bus *root = pdev->bus;
62 	struct resource *root_res;
63 	resource_size_t rebar_size;
64 	resource_size_t current_size;
65 	intel_wakeref_t wakeref;
66 	u32 pci_cmd;
67 	int i;
68 
69 	current_size = roundup_pow_of_two(pci_resource_len(pdev, GEN12_LMEM_BAR));
70 
71 	if (i915->params.lmem_bar_size) {
72 		u32 bar_sizes;
73 
74 		rebar_size = i915->params.lmem_bar_size *
75 			(resource_size_t)SZ_1M;
76 		bar_sizes = pci_rebar_get_possible_sizes(pdev, GEN12_LMEM_BAR);
77 
78 		if (rebar_size == current_size)
79 			return;
80 
81 		if (!(bar_sizes & BIT(pci_rebar_bytes_to_size(rebar_size))) ||
82 		    rebar_size >= roundup_pow_of_two(lmem_size)) {
83 			rebar_size = lmem_size;
84 
85 			drm_info(&i915->drm,
86 				 "Given bar size is not within supported size, setting it to default: %llu\n",
87 				 (u64)lmem_size >> 20);
88 		}
89 	} else {
90 		rebar_size = current_size;
91 
92 		if (rebar_size != roundup_pow_of_two(lmem_size))
93 			rebar_size = lmem_size;
94 		else
95 			return;
96 	}
97 
98 	/* Find out if root bus contains 64bit memory addressing */
99 	while (root->parent)
100 		root = root->parent;
101 
102 	pci_bus_for_each_resource(root, root_res, i) {
103 		if (root_res && root_res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
104 		    root_res->start > 0x100000000ull)
105 			break;
106 	}
107 
108 	/* pci_resize_resource will fail anyways */
109 	if (!root_res) {
110 		drm_info(&i915->drm, "Can't resize LMEM BAR - platform support is missing\n");
111 		return;
112 	}
113 
114 	/*
115 	 * Releasing forcewake during BAR resizing results in later forcewake
116 	 * ack timeouts and former can happen any time - it is asynchronous.
117 	 * Grabbing all forcewakes prevents it.
118 	 */
119 	with_intel_runtime_pm(i915->uncore.rpm, wakeref) {
120 		intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
121 
122 		/* First disable PCI memory decoding references */
123 		pci_read_config_dword(pdev, PCI_COMMAND, &pci_cmd);
124 		pci_write_config_dword(pdev, PCI_COMMAND,
125 				       pci_cmd & ~PCI_COMMAND_MEMORY);
126 
127 		_resize_bar(i915, GEN12_LMEM_BAR, rebar_size);
128 
129 		pci_assign_unassigned_bus_resources(pdev->bus);
130 		pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd);
131 		intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
132 	}
133 #endif
134 }
135 #else
136 static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t lmem_size) {}
137 #endif
138 
139 static int
140 region_lmem_release(struct intel_memory_region *mem)
141 {
142 	int ret;
143 
144 	ret = intel_region_ttm_fini(mem);
145 	STUB();
146 #ifdef notyet
147 	io_mapping_fini(&mem->iomap);
148 #endif
149 
150 	return ret;
151 }
152 
153 static int
154 region_lmem_init(struct intel_memory_region *mem)
155 {
156 	int ret;
157 
158 #ifdef __linux__
159 	if (!io_mapping_init_wc(&mem->iomap,
160 				mem->io_start,
161 				mem->io_size))
162 		return -EIO;
163 #else
164 	struct drm_i915_private *i915 = mem->i915;
165 	paddr_t start, end;
166 	struct vm_page *pgs;
167 	int i;
168 	bus_space_handle_t bsh;
169 
170 	start = atop(mem->io_start);
171 	end = start + atop(mem->io_size);
172 	uvm_page_physload(start, end, start, end, PHYSLOAD_DEVICE);
173 
174 	pgs = PHYS_TO_VM_PAGE(mem->io_start);
175 	for (i = 0; i < atop(mem->io_size); i++)
176 		atomic_setbits_int(&(pgs[i].pg_flags), PG_PMAP_WC);
177 
178 	if (bus_space_map(i915->bst, mem->io_start, mem->io_size,
179 	    BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE, &bsh))
180 		panic("can't map lmem");
181 
182 	mem->iomap.base = mem->io_start;
183 	mem->iomap.size = mem->io_size;
184 	mem->iomap.iomem = bus_space_vaddr(i915->bst, bsh);
185 #endif
186 
187 	ret = intel_region_ttm_init(mem);
188 	if (ret)
189 		goto out_no_buddy;
190 
191 	return 0;
192 
193 out_no_buddy:
194 #ifdef __linux__
195 	io_mapping_fini(&mem->iomap);
196 #endif
197 
198 	return ret;
199 }
200 
201 static const struct intel_memory_region_ops intel_region_lmem_ops = {
202 	.init = region_lmem_init,
203 	.release = region_lmem_release,
204 	.init_object = __i915_gem_ttm_object_init,
205 };
206 
207 static bool get_legacy_lowmem_region(struct intel_uncore *uncore,
208 				     u64 *start, u32 *size)
209 {
210 	if (!IS_DG1(uncore->i915))
211 		return false;
212 
213 	*start = 0;
214 	*size = SZ_1M;
215 
216 	drm_dbg(&uncore->i915->drm, "LMEM: reserved legacy low-memory [0x%llx-0x%llx]\n",
217 		*start, *start + *size);
218 
219 	return true;
220 }
221 
222 static int reserve_lowmem_region(struct intel_uncore *uncore,
223 				 struct intel_memory_region *mem)
224 {
225 	u64 reserve_start;
226 	u32 reserve_size;
227 	int ret;
228 
229 	if (!get_legacy_lowmem_region(uncore, &reserve_start, &reserve_size))
230 		return 0;
231 
232 	ret = intel_memory_region_reserve(mem, reserve_start, reserve_size);
233 	if (ret)
234 		drm_err(&uncore->i915->drm, "LMEM: reserving low memory region failed\n");
235 
236 	return ret;
237 }
238 
239 static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
240 {
241 	struct drm_i915_private *i915 = gt->i915;
242 	struct intel_uncore *uncore = gt->uncore;
243 	struct pci_dev *pdev = i915->drm.pdev;
244 	struct intel_memory_region *mem;
245 	resource_size_t min_page_size;
246 	resource_size_t io_start;
247 	resource_size_t io_size;
248 	resource_size_t lmem_size;
249 	int err;
250 
251 	if (!IS_DGFX(i915))
252 		return ERR_PTR(-ENODEV);
253 
254 #ifdef notyet
255 	if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR))
256 		return ERR_PTR(-ENXIO);
257 #endif
258 
259 	if (HAS_FLAT_CCS(i915)) {
260 		resource_size_t lmem_range;
261 		u64 tile_stolen, flat_ccs_base;
262 
263 		lmem_range = intel_gt_mcr_read_any(to_gt(i915), XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
264 		lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT;
265 		lmem_size *= SZ_1G;
266 
267 		flat_ccs_base = intel_gt_mcr_read_any(gt, XEHP_FLAT_CCS_BASE_ADDR);
268 		flat_ccs_base = (flat_ccs_base >> XEHP_CCS_BASE_SHIFT) * SZ_64K;
269 
270 		if (GEM_WARN_ON(lmem_size < flat_ccs_base))
271 			return ERR_PTR(-EIO);
272 
273 		tile_stolen = lmem_size - flat_ccs_base;
274 
275 		/* If the FLAT_CCS_BASE_ADDR register is not populated, flag an error */
276 		if (tile_stolen == lmem_size)
277 			drm_err(&i915->drm,
278 				"CCS_BASE_ADDR register did not have expected value\n");
279 
280 		lmem_size -= tile_stolen;
281 	} else {
282 		/* Stolen starts from GSMBASE without CCS */
283 		lmem_size = intel_uncore_read64(&i915->uncore, GEN12_GSMBASE);
284 	}
285 
286 	i915_resize_lmem_bar(i915, lmem_size);
287 
288 	if (i915->params.lmem_size > 0) {
289 		lmem_size = min_t(resource_size_t, lmem_size,
290 				  mul_u32_u32(i915->params.lmem_size, SZ_1M));
291 	}
292 
293 #ifdef __linux__
294 	io_start = pci_resource_start(pdev, GEN12_LMEM_BAR);
295 	io_size = min(pci_resource_len(pdev, GEN12_LMEM_BAR), lmem_size);
296 #else
297 	{
298 		pcireg_t type;
299 		bus_size_t len;
300 
301 		type = pci_mapreg_type(i915->pc, i915->tag,
302 		    0x10 + (4 * GEN12_LMEM_BAR));
303 		err = -pci_mapreg_info(i915->pc, i915->tag,
304 		    0x10 + (4 * GEN12_LMEM_BAR), type, &io_start, &len, NULL);
305 		io_size = min(len, lmem_size);
306 	}
307 #endif
308 	if (!io_size)
309 		return ERR_PTR(-EIO);
310 
311 	min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
312 						I915_GTT_PAGE_SIZE_4K;
313 	mem = intel_memory_region_create(i915,
314 					 0,
315 					 lmem_size,
316 					 min_page_size,
317 					 io_start,
318 					 io_size,
319 					 INTEL_MEMORY_LOCAL,
320 					 0,
321 					 &intel_region_lmem_ops);
322 	if (IS_ERR(mem))
323 		return mem;
324 
325 	err = reserve_lowmem_region(uncore, mem);
326 	if (err)
327 		goto err_region_put;
328 
329 	drm_dbg(&i915->drm, "Local memory: %pR\n", &mem->region);
330 	drm_dbg(&i915->drm, "Local memory IO start: %pa\n",
331 		&mem->io_start);
332 	drm_info(&i915->drm, "Local memory IO size: %pa\n",
333 		 &mem->io_size);
334 	drm_info(&i915->drm, "Local memory available: %pa\n",
335 		 &lmem_size);
336 
337 	if (io_size < lmem_size)
338 		drm_info(&i915->drm, "Using a reduced BAR size of %lluMiB. Consider enabling 'Resizable BAR' or similar, if available in the BIOS.\n",
339 			 (u64)io_size >> 20);
340 
341 	return mem;
342 
343 err_region_put:
344 	intel_memory_region_destroy(mem);
345 	return ERR_PTR(err);
346 }
347 
348 struct intel_memory_region *intel_gt_setup_lmem(struct intel_gt *gt)
349 {
350 	return setup_lmem(gt);
351 }
352