xref: /dragonfly/sys/dev/drm/i915/i915_gem_stolen.c (revision 5ca0a96d)
1 /*
2  * Copyright © 2008-2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Chris Wilson <chris@chris-wilson.co.uk>
26  *
27  */
28 
29 #include <drm/drmP.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 
33 #define KB(x) ((x) * 1024)
34 #define MB(x) (KB(x) * 1024)
35 
36 /*
37  * The BIOS typically reserves some of the system's memory for the exclusive
38  * use of the integrated graphics. This memory is no longer available for
39  * use by the OS and so the user finds that his system has less memory
40  * available than he put in. We refer to this memory as stolen.
41  *
42  * The BIOS will allocate its framebuffer from the stolen memory. Our
43  * goal is try to reuse that object for our own fbcon which must always
44  * be available for panics. Anything else we can reuse the stolen memory
45  * for is a boon.
46  */
47 
48 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
49 					 struct drm_mm_node *node, u64 size,
50 					 unsigned alignment, u64 start, u64 end)
51 {
52 	int ret;
53 
54 	if (!drm_mm_initialized(&dev_priv->mm.stolen))
55 		return -ENODEV;
56 
57 	mutex_lock(&dev_priv->mm.stolen_lock);
58 	ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
59 					  size, alignment, 0,
60 					  start, end, DRM_MM_INSERT_BEST);
61 	mutex_unlock(&dev_priv->mm.stolen_lock);
62 
63 	return ret;
64 }
65 
66 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
67 				struct drm_mm_node *node, u64 size,
68 				unsigned alignment)
69 {
70 	return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
71 						    alignment, 0, U64_MAX);
72 }
73 
74 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
75 				 struct drm_mm_node *node)
76 {
77 	mutex_lock(&dev_priv->mm.stolen_lock);
78 	drm_mm_remove_node(node);
79 	mutex_unlock(&dev_priv->mm.stolen_lock);
80 }
81 
82 #ifdef __DragonFly__
83 static
84 struct resource * devm_request_mem_region(struct device *dev,
85     resource_size_t start, resource_size_t n, const char *name)
86 {
87 	static struct rman stolen_rman;
88 	struct resource *res;
89 
90 	stolen_rman.rm_start = start;
91 	stolen_rman.rm_end = start + n;
92 	stolen_rman.rm_type = RMAN_ARRAY;
93 	stolen_rman.rm_descr = name;
94 	if (rman_init(&stolen_rman, -1))
95 		return NULL;
96 
97 	if (rman_manage_region(&stolen_rman, stolen_rman.rm_start, stolen_rman.rm_end))
98 		return NULL;
99 
100 	res = kmalloc(sizeof(*res), M_DRM, GFP_KERNEL);
101 	return res;
102 }
103 #endif	/* __DragonFly__ */
104 
105 static dma_addr_t i915_stolen_to_dma(struct drm_i915_private *dev_priv)
106 {
107 	struct pci_dev *pdev = dev_priv->drm.pdev;
108 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
109 	struct resource *r;
110 	dma_addr_t base;
111 
112 	/* Almost universally we can find the Graphics Base of Stolen Memory
113 	 * at register BSM (0x5c) in the igfx configuration space. On a few
114 	 * (desktop) machines this is also mirrored in the bridge device at
115 	 * different locations, or in the MCHBAR.
116 	 *
117 	 * On 865 we just check the TOUD register.
118 	 *
119 	 * On 830/845/85x the stolen memory base isn't available in any
120 	 * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
121 	 *
122 	 */
123 	base = 0;
124 	if (INTEL_GEN(dev_priv) >= 3) {
125 		u32 bsm;
126 
127 		pci_read_config_dword(pdev, INTEL_BSM, &bsm);
128 
129 		base = bsm & INTEL_BSM_MASK;
130 	} else if (IS_I865G(dev_priv)) {
131 		u32 tseg_size = 0;
132 		u16 toud = 0;
133 		u8 tmp;
134 
135 		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
136 					 I845_ESMRAMC, &tmp);
137 
138 		if (tmp & TSEG_ENABLE) {
139 			switch (tmp & I845_TSEG_SIZE_MASK) {
140 			case I845_TSEG_SIZE_512K:
141 				tseg_size = KB(512);
142 				break;
143 			case I845_TSEG_SIZE_1M:
144 				tseg_size = MB(1);
145 				break;
146 			}
147 		}
148 
149 		pci_bus_read_config_word(pdev->bus, PCI_DEVFN(0, 0),
150 					 I865_TOUD, &toud);
151 
152 		base = (toud << 16) + tseg_size;
153 	} else if (IS_I85X(dev_priv)) {
154 		u32 tseg_size = 0;
155 		u32 tom;
156 		u8 tmp;
157 
158 		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
159 					 I85X_ESMRAMC, &tmp);
160 
161 		if (tmp & TSEG_ENABLE)
162 			tseg_size = MB(1);
163 
164 		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 1),
165 					 I85X_DRB3, &tmp);
166 		tom = tmp * MB(32);
167 
168 		base = tom - tseg_size - ggtt->stolen_size;
169 	} else if (IS_I845G(dev_priv)) {
170 		u32 tseg_size = 0;
171 		u32 tom;
172 		u8 tmp;
173 
174 		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
175 					 I845_ESMRAMC, &tmp);
176 
177 		if (tmp & TSEG_ENABLE) {
178 			switch (tmp & I845_TSEG_SIZE_MASK) {
179 			case I845_TSEG_SIZE_512K:
180 				tseg_size = KB(512);
181 				break;
182 			case I845_TSEG_SIZE_1M:
183 				tseg_size = MB(1);
184 				break;
185 			}
186 		}
187 
188 		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
189 					 I830_DRB3, &tmp);
190 		tom = tmp * MB(32);
191 
192 		base = tom - tseg_size - ggtt->stolen_size;
193 	} else if (IS_I830(dev_priv)) {
194 		u32 tseg_size = 0;
195 		u32 tom;
196 		u8 tmp;
197 
198 		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
199 					 I830_ESMRAMC, &tmp);
200 
201 		if (tmp & TSEG_ENABLE) {
202 			if (tmp & I830_TSEG_SIZE_1M)
203 				tseg_size = MB(1);
204 			else
205 				tseg_size = KB(512);
206 		}
207 
208 		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
209 					 I830_DRB3, &tmp);
210 		tom = tmp * MB(32);
211 
212 		base = tom - tseg_size - ggtt->stolen_size;
213 	}
214 
215 	if (base == 0 || add_overflows(base, ggtt->stolen_size))
216 		return 0;
217 
218 	/* make sure we don't clobber the GTT if it's within stolen memory */
219 	if (INTEL_GEN(dev_priv) <= 4 &&
220 	    !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) {
221 		struct {
222 			dma_addr_t start, end;
223 		} stolen[2] = {
224 			{ .start = base, .end = base + ggtt->stolen_size, },
225 			{ .start = base, .end = base + ggtt->stolen_size, },
226 		};
227 		u64 ggtt_start, ggtt_end;
228 
229 		ggtt_start = I915_READ(PGTBL_CTL);
230 		if (IS_GEN4(dev_priv))
231 			ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
232 				     (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
233 		else
234 			ggtt_start &= PGTBL_ADDRESS_LO_MASK;
235 		ggtt_end = ggtt_start + ggtt_total_entries(ggtt) * 4;
236 
237 		if (ggtt_start >= stolen[0].start && ggtt_start < stolen[0].end)
238 			stolen[0].end = ggtt_start;
239 		if (ggtt_end > stolen[1].start && ggtt_end <= stolen[1].end)
240 			stolen[1].start = ggtt_end;
241 
242 		/* pick the larger of the two chunks */
243 		if (stolen[0].end - stolen[0].start >
244 		    stolen[1].end - stolen[1].start) {
245 			base = stolen[0].start;
246 			ggtt->stolen_size = stolen[0].end - stolen[0].start;
247 		} else {
248 			base = stolen[1].start;
249 			ggtt->stolen_size = stolen[1].end - stolen[1].start;
250 		}
251 
252 		if (stolen[0].start != stolen[1].start ||
253 		    stolen[0].end != stolen[1].end) {
254 			dma_addr_t end = base + ggtt->stolen_size - 1;
255 
256 			DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
257 				      (unsigned long long)ggtt_start,
258 				      (unsigned long long)ggtt_end - 1);
259 			DRM_DEBUG_KMS("Stolen memory adjusted to %pad - %pad\n",
260 				      &base, &end);
261 		}
262 	}
263 
264 
265 	/* Verify that nothing else uses this physical address. Stolen
266 	 * memory should be reserved by the BIOS and hidden from the
267 	 * kernel. So if the region is already marked as busy, something
268 	 * is seriously wrong.
269 	 */
270 	r = devm_request_mem_region(dev_priv->drm.dev, base, ggtt->stolen_size,
271 				    "Graphics Stolen Memory");
272 	if (r == NULL) {
273 		/*
274 		 * One more attempt but this time requesting region from
275 		 * base + 1, as we have seen that this resolves the region
276 		 * conflict with the PCI Bus.
277 		 * This is a BIOS w/a: Some BIOS wrap stolen in the root
278 		 * PCI bus, but have an off-by-one error. Hence retry the
279 		 * reservation starting from 1 instead of 0.
280 		 * There's also BIOS with off-by-one on the other end.
281 		 */
282 		r = devm_request_mem_region(dev_priv->drm.dev, base + 1,
283 					    ggtt->stolen_size - 2,
284 					    "Graphics Stolen Memory");
285 		/*
286 		 * GEN3 firmware likes to smash pci bridges into the stolen
287 		 * range. Apparently this works.
288 		 */
289 		if (r == NULL && !IS_GEN3(dev_priv)) {
290 			dma_addr_t end = base + ggtt->stolen_size;
291 
292 			DRM_ERROR("conflict detected with stolen region: [%pad - %pad]\n",
293 				  &base, &end);
294 			base = 0;
295 		}
296 	}
297 
298 	return base;
299 }
300 
301 void i915_gem_cleanup_stolen(struct drm_device *dev)
302 {
303 	struct drm_i915_private *dev_priv = to_i915(dev);
304 
305 	if (!drm_mm_initialized(&dev_priv->mm.stolen))
306 		return;
307 
308 	drm_mm_takedown(&dev_priv->mm.stolen);
309 }
310 
311 static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
312 				    dma_addr_t *base, u32 *size)
313 {
314 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
315 	uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
316 				     CTG_STOLEN_RESERVED :
317 				     ELK_STOLEN_RESERVED);
318 	dma_addr_t stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
319 
320 	*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
321 
322 	WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
323 
324 	/* On these platforms, the register doesn't have a size field, so the
325 	 * size is the distance between the base and the top of the stolen
326 	 * memory. We also have the genuine case where base is zero and there's
327 	 * nothing reserved. */
328 	if (*base == 0)
329 		*size = 0;
330 	else
331 		*size = stolen_top - *base;
332 }
333 
334 static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
335 				     dma_addr_t *base, u32 *size)
336 {
337 	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
338 
339 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
340 
341 	switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
342 	case GEN6_STOLEN_RESERVED_1M:
343 		*size = 1024 * 1024;
344 		break;
345 	case GEN6_STOLEN_RESERVED_512K:
346 		*size = 512 * 1024;
347 		break;
348 	case GEN6_STOLEN_RESERVED_256K:
349 		*size = 256 * 1024;
350 		break;
351 	case GEN6_STOLEN_RESERVED_128K:
352 		*size = 128 * 1024;
353 		break;
354 	default:
355 		*size = 1024 * 1024;
356 		MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
357 	}
358 }
359 
360 static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
361 				     dma_addr_t *base, u32 *size)
362 {
363 	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
364 
365 	*base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
366 
367 	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
368 	case GEN7_STOLEN_RESERVED_1M:
369 		*size = 1024 * 1024;
370 		break;
371 	case GEN7_STOLEN_RESERVED_256K:
372 		*size = 256 * 1024;
373 		break;
374 	default:
375 		*size = 1024 * 1024;
376 		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
377 	}
378 }
379 
380 static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
381 				    dma_addr_t *base, u32 *size)
382 {
383 	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
384 
385 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
386 
387 	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
388 	case GEN8_STOLEN_RESERVED_1M:
389 		*size = 1024 * 1024;
390 		break;
391 	case GEN8_STOLEN_RESERVED_2M:
392 		*size = 2 * 1024 * 1024;
393 		break;
394 	case GEN8_STOLEN_RESERVED_4M:
395 		*size = 4 * 1024 * 1024;
396 		break;
397 	case GEN8_STOLEN_RESERVED_8M:
398 		*size = 8 * 1024 * 1024;
399 		break;
400 	default:
401 		*size = 8 * 1024 * 1024;
402 		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
403 	}
404 }
405 
406 static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
407 				    dma_addr_t *base, u32 *size)
408 {
409 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
410 	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
411 	dma_addr_t stolen_top;
412 
413 	stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
414 
415 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
416 
417 	/* On these platforms, the register doesn't have a size field, so the
418 	 * size is the distance between the base and the top of the stolen
419 	 * memory. We also have the genuine case where base is zero and there's
420 	 * nothing reserved. */
421 	if (*base == 0)
422 		*size = 0;
423 	else
424 		*size = stolen_top - *base;
425 }
426 
427 int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
428 {
429 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
430 	dma_addr_t reserved_base, stolen_top;
431 	u32 reserved_total, reserved_size;
432 	u32 stolen_usable_start;
433 
434 	lockinit(&dev_priv->mm.stolen_lock, "i915msl", 0, LK_CANRECURSE);
435 
436 	if (intel_vgpu_active(dev_priv)) {
437 		DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
438 		return 0;
439 	}
440 
441 	if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) {
442 		DRM_INFO("DMAR active, disabling use of stolen memory\n");
443 		return 0;
444 	}
445 
446 	if (ggtt->stolen_size == 0)
447 		return 0;
448 
449 	dev_priv->mm.stolen_base = i915_stolen_to_dma(dev_priv);
450 	if (dev_priv->mm.stolen_base == 0)
451 		return 0;
452 
453 	stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
454 	reserved_base = 0;
455 	reserved_size = 0;
456 
457 	switch (INTEL_INFO(dev_priv)->gen) {
458 	case 2:
459 	case 3:
460 		break;
461 	case 4:
462 		if (IS_G4X(dev_priv))
463 			g4x_get_stolen_reserved(dev_priv,
464 						&reserved_base, &reserved_size);
465 		break;
466 	case 5:
467 		/* Assume the gen6 maximum for the older platforms. */
468 		reserved_size = 1024 * 1024;
469 		reserved_base = stolen_top - reserved_size;
470 		break;
471 	case 6:
472 		gen6_get_stolen_reserved(dev_priv,
473 					 &reserved_base, &reserved_size);
474 		break;
475 	case 7:
476 		gen7_get_stolen_reserved(dev_priv,
477 					 &reserved_base, &reserved_size);
478 		break;
479 	default:
480 		if (IS_LP(dev_priv))
481 			chv_get_stolen_reserved(dev_priv,
482 						&reserved_base, &reserved_size);
483 		else
484 			bdw_get_stolen_reserved(dev_priv,
485 						&reserved_base, &reserved_size);
486 		break;
487 	}
488 
489 	/* It is possible for the reserved base to be zero, but the register
490 	 * field for size doesn't have a zero option. */
491 	if (reserved_base == 0) {
492 		reserved_size = 0;
493 		reserved_base = stolen_top;
494 	}
495 
496 	if (reserved_base < dev_priv->mm.stolen_base ||
497 	    reserved_base + reserved_size > stolen_top) {
498 		dma_addr_t reserved_top = reserved_base + reserved_size;
499 		DRM_DEBUG_KMS("Stolen reserved area [%pad - %pad] outside stolen memory [%pad - %pad]\n",
500 			      &reserved_base, &reserved_top,
501 			      &dev_priv->mm.stolen_base, &stolen_top);
502 		return 0;
503 	}
504 
505 	ggtt->stolen_reserved_base = reserved_base;
506 	ggtt->stolen_reserved_size = reserved_size;
507 
508 	/* It is possible for the reserved area to end before the end of stolen
509 	 * memory, so just consider the start. */
510 	reserved_total = stolen_top - reserved_base;
511 
512 	DRM_DEBUG_KMS("Memory reserved for graphics device: %uK, usable: %uK\n",
513 		      ggtt->stolen_size >> 10,
514 		      (ggtt->stolen_size - reserved_total) >> 10);
515 
516 	stolen_usable_start = 0;
517 	/* WaSkipStolenMemoryFirstPage:bdw+ */
518 	if (INTEL_GEN(dev_priv) >= 8)
519 		stolen_usable_start = 4096;
520 
521 	ggtt->stolen_usable_size =
522 		ggtt->stolen_size - reserved_total - stolen_usable_start;
523 
524 	/* Basic memrange allocator for stolen space. */
525 	drm_mm_init(&dev_priv->mm.stolen, stolen_usable_start,
526 		    ggtt->stolen_usable_size);
527 
528 	return 0;
529 }
530 
531 static struct sg_table *
532 i915_pages_create_for_stolen(struct drm_device *dev,
533 			     u32 offset, u32 size)
534 {
535 	struct drm_i915_private *dev_priv = to_i915(dev);
536 	struct sg_table *st;
537 	struct scatterlist *sg;
538 
539 	GEM_BUG_ON(range_overflows(offset, size, dev_priv->ggtt.stolen_size));
540 
541 	/* We hide that we have no struct page backing our stolen object
542 	 * by wrapping the contiguous physical allocation with a fake
543 	 * dma mapping in a single scatterlist.
544 	 */
545 
546 	st = kmalloc(sizeof(*st), M_DRM, GFP_KERNEL);
547 	if (st == NULL)
548 		return ERR_PTR(-ENOMEM);
549 
550 	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
551 		kfree(st);
552 		return ERR_PTR(-ENOMEM);
553 	}
554 
555 	sg = st->sgl;
556 	sg->offset = 0;
557 	sg->length = size;
558 
559 	sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
560 	sg_dma_len(sg) = size;
561 
562 	return st;
563 }
564 
565 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
566 {
567 	struct sg_table *pages =
568 		i915_pages_create_for_stolen(obj->base.dev,
569 					     obj->stolen->start,
570 					     obj->stolen->size);
571 	if (IS_ERR(pages))
572 		return PTR_ERR(pages);
573 
574 	__i915_gem_object_set_pages(obj, pages, obj->stolen->size);
575 
576 	return 0;
577 }
578 
579 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
580 					     struct sg_table *pages)
581 {
582 	/* Should only be called from i915_gem_object_release_stolen() */
583 	sg_free_table(pages);
584 	kfree(pages);
585 }
586 
587 static void
588 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
589 {
590 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
591 	struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
592 
593 	GEM_BUG_ON(!stolen);
594 
595 	__i915_gem_object_unpin_pages(obj);
596 
597 	i915_gem_stolen_remove_node(dev_priv, stolen);
598 	kfree(stolen);
599 }
600 
601 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
602 	.get_pages = i915_gem_object_get_pages_stolen,
603 	.put_pages = i915_gem_object_put_pages_stolen,
604 	.release = i915_gem_object_release_stolen,
605 };
606 
607 static struct drm_i915_gem_object *
608 _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
609 			       struct drm_mm_node *stolen)
610 {
611 	struct drm_i915_gem_object *obj;
612 	unsigned int cache_level;
613 
614 	obj = i915_gem_object_alloc(dev_priv);
615 	if (obj == NULL)
616 		return NULL;
617 
618 	drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size);
619 	i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
620 
621 	obj->stolen = stolen;
622 	obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
623 	cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
624 	i915_gem_object_set_cache_coherency(obj, cache_level);
625 
626 	if (i915_gem_object_pin_pages(obj))
627 		goto cleanup;
628 
629 	return obj;
630 
631 cleanup:
632 	i915_gem_object_free(obj);
633 	return NULL;
634 }
635 
636 struct drm_i915_gem_object *
637 i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size)
638 {
639 	struct drm_i915_gem_object *obj;
640 	struct drm_mm_node *stolen;
641 	int ret;
642 
643 	if (!drm_mm_initialized(&dev_priv->mm.stolen))
644 		return NULL;
645 
646 	if (size == 0)
647 		return NULL;
648 
649 	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
650 	if (!stolen)
651 		return NULL;
652 
653 	ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
654 	if (ret) {
655 		kfree(stolen);
656 		return NULL;
657 	}
658 
659 	obj = _i915_gem_object_create_stolen(dev_priv, stolen);
660 	if (obj)
661 		return obj;
662 
663 	i915_gem_stolen_remove_node(dev_priv, stolen);
664 	kfree(stolen);
665 	return NULL;
666 }
667 
668 struct drm_i915_gem_object *
669 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
670 					       u32 stolen_offset,
671 					       u32 gtt_offset,
672 					       u32 size)
673 {
674 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
675 	struct drm_i915_gem_object *obj;
676 	struct drm_mm_node *stolen;
677 	struct i915_vma *vma;
678 	int ret;
679 
680 	if (!drm_mm_initialized(&dev_priv->mm.stolen))
681 		return NULL;
682 
683 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
684 
685 	DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
686 			stolen_offset, gtt_offset, size);
687 
688 	/* KISS and expect everything to be page-aligned */
689 	if (WARN_ON(size == 0) ||
690 	    WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
691 	    WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
692 		return NULL;
693 
694 	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
695 	if (!stolen)
696 		return NULL;
697 
698 	stolen->start = stolen_offset;
699 	stolen->size = size;
700 	mutex_lock(&dev_priv->mm.stolen_lock);
701 	ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
702 	mutex_unlock(&dev_priv->mm.stolen_lock);
703 	if (ret) {
704 		DRM_DEBUG_KMS("failed to allocate stolen space\n");
705 		kfree(stolen);
706 		return NULL;
707 	}
708 
709 	obj = _i915_gem_object_create_stolen(dev_priv, stolen);
710 	if (obj == NULL) {
711 		DRM_DEBUG_KMS("failed to allocate stolen object\n");
712 		i915_gem_stolen_remove_node(dev_priv, stolen);
713 		kfree(stolen);
714 		return NULL;
715 	}
716 
717 	/* Some objects just need physical mem from stolen space */
718 	if (gtt_offset == I915_GTT_OFFSET_NONE)
719 		return obj;
720 
721 	ret = i915_gem_object_pin_pages(obj);
722 	if (ret)
723 		goto err;
724 
725 	vma = i915_vma_instance(obj, &ggtt->base, NULL);
726 	if (IS_ERR(vma)) {
727 		ret = PTR_ERR(vma);
728 		goto err_pages;
729 	}
730 
731 	/* To simplify the initialisation sequence between KMS and GTT,
732 	 * we allow construction of the stolen object prior to
733 	 * setting up the GTT space. The actual reservation will occur
734 	 * later.
735 	 */
736 	ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node,
737 				   size, gtt_offset, obj->cache_level,
738 				   0);
739 	if (ret) {
740 		DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
741 		goto err_pages;
742 	}
743 
744 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
745 
746 	vma->pages = obj->mm.pages;
747 	vma->flags |= I915_VMA_GLOBAL_BIND;
748 	__i915_vma_set_map_and_fenceable(vma);
749 	list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
750 
751 	lockmgr(&dev_priv->mm.obj_lock, LK_EXCLUSIVE);
752 	list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
753 	obj->bind_count++;
754 	lockmgr(&dev_priv->mm.obj_lock, LK_RELEASE);
755 
756 	return obj;
757 
758 err_pages:
759 	i915_gem_object_unpin_pages(obj);
760 err:
761 	i915_gem_object_put(obj);
762 	return NULL;
763 }
764