xref: /linux/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c (revision 52338415)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <linux/delay.h>
8 
9 #include <drm/drm_vblank.h>
10 
11 #include "msm_drv.h"
12 #include "msm_gem.h"
13 #include "msm_mmu.h"
14 #include "mdp4_kms.h"
15 
16 static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
17 
18 static int mdp4_hw_init(struct msm_kms *kms)
19 {
20 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
21 	struct drm_device *dev = mdp4_kms->dev;
22 	uint32_t version, major, minor, dmap_cfg, vg_cfg;
23 	unsigned long clk;
24 	int ret = 0;
25 
26 	pm_runtime_get_sync(dev->dev);
27 
28 	mdp4_enable(mdp4_kms);
29 	version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
30 	mdp4_disable(mdp4_kms);
31 
32 	major = FIELD(version, MDP4_VERSION_MAJOR);
33 	minor = FIELD(version, MDP4_VERSION_MINOR);
34 
35 	DBG("found MDP4 version v%d.%d", major, minor);
36 
37 	if (major != 4) {
38 		DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n",
39 				major, minor);
40 		ret = -ENXIO;
41 		goto out;
42 	}
43 
44 	mdp4_kms->rev = minor;
45 
46 	if (mdp4_kms->rev > 1) {
47 		mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
48 		mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
49 	}
50 
51 	mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3);
52 
53 	/* max read pending cmd config, 3 pending requests: */
54 	mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222);
55 
56 	clk = clk_get_rate(mdp4_kms->clk);
57 
58 	if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) {
59 		dmap_cfg = 0x47;     /* 16 bytes-burst x 8 req */
60 		vg_cfg = 0x47;       /* 16 bytes-burs x 8 req */
61 	} else {
62 		dmap_cfg = 0x27;     /* 8 bytes-burst x 8 req */
63 		vg_cfg = 0x43;       /* 16 bytes-burst x 4 req */
64 	}
65 
66 	DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg);
67 
68 	mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg);
69 	mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg);
70 
71 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg);
72 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg);
73 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg);
74 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg);
75 
76 	if (mdp4_kms->rev >= 2)
77 		mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1);
78 	mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, 0);
79 
80 	/* disable CSC matrix / YUV by default: */
81 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0);
82 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0);
83 	mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0);
84 	mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0);
85 	mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0);
86 	mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0);
87 
88 	if (mdp4_kms->rev > 1)
89 		mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
90 
91 	dev->mode_config.allow_fb_modifiers = true;
92 
93 out:
94 	pm_runtime_put_sync(dev->dev);
95 
96 	return ret;
97 }
98 
99 static void mdp4_enable_commit(struct msm_kms *kms)
100 {
101 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
102 	mdp4_enable(mdp4_kms);
103 }
104 
105 static void mdp4_disable_commit(struct msm_kms *kms)
106 {
107 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
108 	mdp4_disable(mdp4_kms);
109 }
110 
111 static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
112 {
113 	int i;
114 	struct drm_crtc *crtc;
115 	struct drm_crtc_state *crtc_state;
116 
117 	/* see 119ecb7fd */
118 	for_each_new_crtc_in_state(state, crtc, crtc_state, i)
119 		drm_crtc_vblank_get(crtc);
120 }
121 
122 static void mdp4_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
123 {
124 	/* TODO */
125 }
126 
127 static void mdp4_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
128 {
129 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
130 	struct drm_crtc *crtc;
131 
132 	for_each_crtc_mask(mdp4_kms->dev, crtc, crtc_mask)
133 		mdp4_crtc_wait_for_commit_done(crtc);
134 }
135 
136 static void mdp4_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
137 {
138 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
139 	struct drm_crtc *crtc;
140 
141 	/* see 119ecb7fd */
142 	for_each_crtc_mask(mdp4_kms->dev, crtc, crtc_mask)
143 		drm_crtc_vblank_put(crtc);
144 }
145 
146 static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
147 		struct drm_encoder *encoder)
148 {
149 	/* if we had >1 encoder, we'd need something more clever: */
150 	switch (encoder->encoder_type) {
151 	case DRM_MODE_ENCODER_TMDS:
152 		return mdp4_dtv_round_pixclk(encoder, rate);
153 	case DRM_MODE_ENCODER_LVDS:
154 	case DRM_MODE_ENCODER_DSI:
155 	default:
156 		return rate;
157 	}
158 }
159 
160 static const char * const iommu_ports[] = {
161 	"mdp_port0_cb0", "mdp_port1_cb0",
162 };
163 
164 static void mdp4_destroy(struct msm_kms *kms)
165 {
166 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
167 	struct device *dev = mdp4_kms->dev->dev;
168 	struct msm_gem_address_space *aspace = kms->aspace;
169 
170 	if (mdp4_kms->blank_cursor_iova)
171 		msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
172 	drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo);
173 
174 	if (aspace) {
175 		aspace->mmu->funcs->detach(aspace->mmu,
176 				iommu_ports, ARRAY_SIZE(iommu_ports));
177 		msm_gem_address_space_put(aspace);
178 	}
179 
180 	if (mdp4_kms->rpm_enabled)
181 		pm_runtime_disable(dev);
182 
183 	kfree(mdp4_kms);
184 }
185 
186 static const struct mdp_kms_funcs kms_funcs = {
187 	.base = {
188 		.hw_init         = mdp4_hw_init,
189 		.irq_preinstall  = mdp4_irq_preinstall,
190 		.irq_postinstall = mdp4_irq_postinstall,
191 		.irq_uninstall   = mdp4_irq_uninstall,
192 		.irq             = mdp4_irq,
193 		.enable_vblank   = mdp4_enable_vblank,
194 		.disable_vblank  = mdp4_disable_vblank,
195 		.enable_commit   = mdp4_enable_commit,
196 		.disable_commit  = mdp4_disable_commit,
197 		.prepare_commit  = mdp4_prepare_commit,
198 		.flush_commit    = mdp4_flush_commit,
199 		.wait_flush      = mdp4_wait_flush,
200 		.complete_commit = mdp4_complete_commit,
201 		.get_format      = mdp_get_format,
202 		.round_pixclk    = mdp4_round_pixclk,
203 		.destroy         = mdp4_destroy,
204 	},
205 	.set_irqmask         = mdp4_set_irqmask,
206 };
207 
208 int mdp4_disable(struct mdp4_kms *mdp4_kms)
209 {
210 	DBG("");
211 
212 	clk_disable_unprepare(mdp4_kms->clk);
213 	if (mdp4_kms->pclk)
214 		clk_disable_unprepare(mdp4_kms->pclk);
215 	if (mdp4_kms->lut_clk)
216 		clk_disable_unprepare(mdp4_kms->lut_clk);
217 	if (mdp4_kms->axi_clk)
218 		clk_disable_unprepare(mdp4_kms->axi_clk);
219 
220 	return 0;
221 }
222 
223 int mdp4_enable(struct mdp4_kms *mdp4_kms)
224 {
225 	DBG("");
226 
227 	clk_prepare_enable(mdp4_kms->clk);
228 	if (mdp4_kms->pclk)
229 		clk_prepare_enable(mdp4_kms->pclk);
230 	if (mdp4_kms->lut_clk)
231 		clk_prepare_enable(mdp4_kms->lut_clk);
232 	if (mdp4_kms->axi_clk)
233 		clk_prepare_enable(mdp4_kms->axi_clk);
234 
235 	return 0;
236 }
237 
238 
239 static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
240 				  int intf_type)
241 {
242 	struct drm_device *dev = mdp4_kms->dev;
243 	struct msm_drm_private *priv = dev->dev_private;
244 	struct drm_encoder *encoder;
245 	struct drm_connector *connector;
246 	struct device_node *panel_node;
247 	int dsi_id;
248 	int ret;
249 
250 	switch (intf_type) {
251 	case DRM_MODE_ENCODER_LVDS:
252 		/*
253 		 * bail out early if there is no panel node (no need to
254 		 * initialize LCDC encoder and LVDS connector)
255 		 */
256 		panel_node = of_graph_get_remote_node(dev->dev->of_node, 0, 0);
257 		if (!panel_node)
258 			return 0;
259 
260 		encoder = mdp4_lcdc_encoder_init(dev, panel_node);
261 		if (IS_ERR(encoder)) {
262 			DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
263 			return PTR_ERR(encoder);
264 		}
265 
266 		/* LCDC can be hooked to DMA_P (TODO: Add DMA_S later?) */
267 		encoder->possible_crtcs = 1 << DMA_P;
268 
269 		connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
270 		if (IS_ERR(connector)) {
271 			DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
272 			return PTR_ERR(connector);
273 		}
274 
275 		priv->encoders[priv->num_encoders++] = encoder;
276 		priv->connectors[priv->num_connectors++] = connector;
277 
278 		break;
279 	case DRM_MODE_ENCODER_TMDS:
280 		encoder = mdp4_dtv_encoder_init(dev);
281 		if (IS_ERR(encoder)) {
282 			DRM_DEV_ERROR(dev->dev, "failed to construct DTV encoder\n");
283 			return PTR_ERR(encoder);
284 		}
285 
286 		/* DTV can be hooked to DMA_E: */
287 		encoder->possible_crtcs = 1 << 1;
288 
289 		if (priv->hdmi) {
290 			/* Construct bridge/connector for HDMI: */
291 			ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
292 			if (ret) {
293 				DRM_DEV_ERROR(dev->dev, "failed to initialize HDMI: %d\n", ret);
294 				return ret;
295 			}
296 		}
297 
298 		priv->encoders[priv->num_encoders++] = encoder;
299 
300 		break;
301 	case DRM_MODE_ENCODER_DSI:
302 		/* only DSI1 supported for now */
303 		dsi_id = 0;
304 
305 		if (!priv->dsi[dsi_id])
306 			break;
307 
308 		encoder = mdp4_dsi_encoder_init(dev);
309 		if (IS_ERR(encoder)) {
310 			ret = PTR_ERR(encoder);
311 			DRM_DEV_ERROR(dev->dev,
312 				"failed to construct DSI encoder: %d\n", ret);
313 			return ret;
314 		}
315 
316 		/* TODO: Add DMA_S later? */
317 		encoder->possible_crtcs = 1 << DMA_P;
318 		priv->encoders[priv->num_encoders++] = encoder;
319 
320 		ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
321 		if (ret) {
322 			DRM_DEV_ERROR(dev->dev, "failed to initialize DSI: %d\n",
323 				ret);
324 			return ret;
325 		}
326 
327 		break;
328 	default:
329 		DRM_DEV_ERROR(dev->dev, "Invalid or unsupported interface\n");
330 		return -EINVAL;
331 	}
332 
333 	return 0;
334 }
335 
336 static int modeset_init(struct mdp4_kms *mdp4_kms)
337 {
338 	struct drm_device *dev = mdp4_kms->dev;
339 	struct msm_drm_private *priv = dev->dev_private;
340 	struct drm_plane *plane;
341 	struct drm_crtc *crtc;
342 	int i, ret;
343 	static const enum mdp4_pipe rgb_planes[] = {
344 		RGB1, RGB2,
345 	};
346 	static const enum mdp4_pipe vg_planes[] = {
347 		VG1, VG2,
348 	};
349 	static const enum mdp4_dma mdp4_crtcs[] = {
350 		DMA_P, DMA_E,
351 	};
352 	static const char * const mdp4_crtc_names[] = {
353 		"DMA_P", "DMA_E",
354 	};
355 	static const int mdp4_intfs[] = {
356 		DRM_MODE_ENCODER_LVDS,
357 		DRM_MODE_ENCODER_DSI,
358 		DRM_MODE_ENCODER_TMDS,
359 	};
360 
361 	/* construct non-private planes: */
362 	for (i = 0; i < ARRAY_SIZE(vg_planes); i++) {
363 		plane = mdp4_plane_init(dev, vg_planes[i], false);
364 		if (IS_ERR(plane)) {
365 			DRM_DEV_ERROR(dev->dev,
366 				"failed to construct plane for VG%d\n", i + 1);
367 			ret = PTR_ERR(plane);
368 			goto fail;
369 		}
370 		priv->planes[priv->num_planes++] = plane;
371 	}
372 
373 	for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) {
374 		plane = mdp4_plane_init(dev, rgb_planes[i], true);
375 		if (IS_ERR(plane)) {
376 			DRM_DEV_ERROR(dev->dev,
377 				"failed to construct plane for RGB%d\n", i + 1);
378 			ret = PTR_ERR(plane);
379 			goto fail;
380 		}
381 
382 		crtc  = mdp4_crtc_init(dev, plane, priv->num_crtcs, i,
383 				mdp4_crtcs[i]);
384 		if (IS_ERR(crtc)) {
385 			DRM_DEV_ERROR(dev->dev, "failed to construct crtc for %s\n",
386 				mdp4_crtc_names[i]);
387 			ret = PTR_ERR(crtc);
388 			goto fail;
389 		}
390 
391 		priv->crtcs[priv->num_crtcs++] = crtc;
392 	}
393 
394 	/*
395 	 * we currently set up two relatively fixed paths:
396 	 *
397 	 * LCDC/LVDS path: RGB1 -> DMA_P -> LCDC -> LVDS
398 	 *			or
399 	 * DSI path: RGB1 -> DMA_P -> DSI1 -> DSI Panel
400 	 *
401 	 * DTV/HDMI path: RGB2 -> DMA_E -> DTV -> HDMI
402 	 */
403 
404 	for (i = 0; i < ARRAY_SIZE(mdp4_intfs); i++) {
405 		ret = mdp4_modeset_init_intf(mdp4_kms, mdp4_intfs[i]);
406 		if (ret) {
407 			DRM_DEV_ERROR(dev->dev, "failed to initialize intf: %d, %d\n",
408 				i, ret);
409 			goto fail;
410 		}
411 	}
412 
413 	return 0;
414 
415 fail:
416 	return ret;
417 }
418 
419 struct msm_kms *mdp4_kms_init(struct drm_device *dev)
420 {
421 	struct platform_device *pdev = to_platform_device(dev->dev);
422 	struct mdp4_platform_config *config = mdp4_get_config(pdev);
423 	struct mdp4_kms *mdp4_kms;
424 	struct msm_kms *kms = NULL;
425 	struct msm_gem_address_space *aspace;
426 	int irq, ret;
427 
428 	mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
429 	if (!mdp4_kms) {
430 		DRM_DEV_ERROR(dev->dev, "failed to allocate kms\n");
431 		ret = -ENOMEM;
432 		goto fail;
433 	}
434 
435 	mdp_kms_init(&mdp4_kms->base, &kms_funcs);
436 
437 	kms = &mdp4_kms->base.base;
438 
439 	mdp4_kms->dev = dev;
440 
441 	mdp4_kms->mmio = msm_ioremap(pdev, NULL, "MDP4");
442 	if (IS_ERR(mdp4_kms->mmio)) {
443 		ret = PTR_ERR(mdp4_kms->mmio);
444 		goto fail;
445 	}
446 
447 	irq = platform_get_irq(pdev, 0);
448 	if (irq < 0) {
449 		ret = irq;
450 		DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
451 		goto fail;
452 	}
453 
454 	kms->irq = irq;
455 
456 	/* NOTE: driver for this regulator still missing upstream.. use
457 	 * _get_exclusive() and ignore the error if it does not exist
458 	 * (and hope that the bootloader left it on for us)
459 	 */
460 	mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd");
461 	if (IS_ERR(mdp4_kms->vdd))
462 		mdp4_kms->vdd = NULL;
463 
464 	if (mdp4_kms->vdd) {
465 		ret = regulator_enable(mdp4_kms->vdd);
466 		if (ret) {
467 			DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n", ret);
468 			goto fail;
469 		}
470 	}
471 
472 	mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
473 	if (IS_ERR(mdp4_kms->clk)) {
474 		DRM_DEV_ERROR(dev->dev, "failed to get core_clk\n");
475 		ret = PTR_ERR(mdp4_kms->clk);
476 		goto fail;
477 	}
478 
479 	mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk");
480 	if (IS_ERR(mdp4_kms->pclk))
481 		mdp4_kms->pclk = NULL;
482 
483 	if (mdp4_kms->rev >= 2) {
484 		mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
485 		if (IS_ERR(mdp4_kms->lut_clk)) {
486 			DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
487 			ret = PTR_ERR(mdp4_kms->lut_clk);
488 			goto fail;
489 		}
490 	}
491 
492 	mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
493 	if (IS_ERR(mdp4_kms->axi_clk)) {
494 		DRM_DEV_ERROR(dev->dev, "failed to get axi_clk\n");
495 		ret = PTR_ERR(mdp4_kms->axi_clk);
496 		goto fail;
497 	}
498 
499 	clk_set_rate(mdp4_kms->clk, config->max_clk);
500 	if (mdp4_kms->lut_clk)
501 		clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
502 
503 	pm_runtime_enable(dev->dev);
504 	mdp4_kms->rpm_enabled = true;
505 
506 	/* make sure things are off before attaching iommu (bootloader could
507 	 * have left things on, in which case we'll start getting faults if
508 	 * we don't disable):
509 	 */
510 	mdp4_enable(mdp4_kms);
511 	mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
512 	mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
513 	mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
514 	mdp4_disable(mdp4_kms);
515 	mdelay(16);
516 
517 	if (config->iommu) {
518 		aspace = msm_gem_address_space_create(&pdev->dev,
519 				config->iommu, "mdp4");
520 		if (IS_ERR(aspace)) {
521 			ret = PTR_ERR(aspace);
522 			goto fail;
523 		}
524 
525 		kms->aspace = aspace;
526 
527 		ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
528 				ARRAY_SIZE(iommu_ports));
529 		if (ret)
530 			goto fail;
531 	} else {
532 		DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
533 				"contig buffers for scanout\n");
534 		aspace = NULL;
535 	}
536 
537 	ret = modeset_init(mdp4_kms);
538 	if (ret) {
539 		DRM_DEV_ERROR(dev->dev, "modeset_init failed: %d\n", ret);
540 		goto fail;
541 	}
542 
543 	mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC | MSM_BO_SCANOUT);
544 	if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
545 		ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
546 		DRM_DEV_ERROR(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
547 		mdp4_kms->blank_cursor_bo = NULL;
548 		goto fail;
549 	}
550 
551 	ret = msm_gem_get_and_pin_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
552 			&mdp4_kms->blank_cursor_iova);
553 	if (ret) {
554 		DRM_DEV_ERROR(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
555 		goto fail;
556 	}
557 
558 	dev->mode_config.min_width = 0;
559 	dev->mode_config.min_height = 0;
560 	dev->mode_config.max_width = 2048;
561 	dev->mode_config.max_height = 2048;
562 
563 	return kms;
564 
565 fail:
566 	if (kms)
567 		mdp4_destroy(kms);
568 	return ERR_PTR(ret);
569 }
570 
571 static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
572 {
573 	static struct mdp4_platform_config config = {};
574 
575 	/* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
576 	config.max_clk = 266667000;
577 	config.iommu = iommu_domain_alloc(&platform_bus_type);
578 	if (config.iommu) {
579 		config.iommu->geometry.aperture_start = 0x1000;
580 		config.iommu->geometry.aperture_end = 0xffffffff;
581 	}
582 
583 	return &config;
584 }
585