xref: /openbsd/sys/dev/pci/drm/i915/gem/i915_gem_lmem.c (revision d5e0de02)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <uapi/drm/i915_drm.h>
7 
8 #include "intel_memory_region.h"
9 #include "gem/i915_gem_region.h"
10 #include "gem/i915_gem_lmem.h"
11 #include "i915_drv.h"
12 
13 void __iomem *
14 i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
15 			    unsigned long n,
16 			    unsigned long size)
17 {
18 	STUB();
19 	return NULL;
20 #ifdef notyet
21 	resource_size_t offset;
22 
23 	GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
24 
25 	offset = i915_gem_object_get_dma_address(obj, n);
26 	offset -= obj->mm.region->region.start;
27 
28 	return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
29 #endif
30 }
31 
32 /**
33  * i915_gem_object_is_lmem - Whether the object is resident in
34  * lmem
35  * @obj: The object to check.
36  *
37  * Even if an object is allowed to migrate and change memory region,
38  * this function checks whether it will always be present in lmem when
39  * valid *or* if that's not the case, whether it's currently resident in lmem.
40  * For migratable and evictable objects, the latter only makes sense when
41  * the object is locked.
42  *
43  * Return: Whether the object migratable but resident in lmem, or not
44  * migratable and will be present in lmem when valid.
45  */
46 bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
47 {
48 	struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
49 
50 #ifdef CONFIG_LOCKDEP
51 	if (i915_gem_object_migratable(obj) &&
52 	    i915_gem_object_evictable(obj))
53 		assert_object_held(obj);
54 #endif
55 	return mr && (mr->type == INTEL_MEMORY_LOCAL ||
56 		      mr->type == INTEL_MEMORY_STOLEN_LOCAL);
57 }
58 
59 /**
60  * __i915_gem_object_is_lmem - Whether the object is resident in
61  * lmem while in the fence signaling critical path.
62  * @obj: The object to check.
63  *
64  * This function is intended to be called from within the fence signaling
65  * path where the fence, or a pin, keeps the object from being migrated. For
66  * example during gpu reset or similar.
67  *
68  * Return: Whether the object is resident in lmem.
69  */
70 bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
71 {
72 	struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
73 
74 #ifdef CONFIG_LOCKDEP
75 	GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP) &&
76 		    i915_gem_object_evictable(obj));
77 #endif
78 	return mr && (mr->type == INTEL_MEMORY_LOCAL ||
79 		      mr->type == INTEL_MEMORY_STOLEN_LOCAL);
80 }
81 
82 /**
83  * __i915_gem_object_create_lmem_with_ps - Create lmem object and force the
84  * minimum page size for the backing pages.
85  * @i915: The i915 instance.
86  * @size: The size in bytes for the object. Note that we need to round the size
87  * up depending on the @page_size. The final object size can be fished out from
88  * the drm GEM object.
89  * @page_size: The requested minimum page size in bytes for this object. This is
90  * useful if we need something bigger than the regions min_page_size due to some
91  * hw restriction, or in some very specialised cases where it needs to be
92  * smaller, where the internal fragmentation cost is too great when rounding up
93  * the object size.
94  * @flags: The optional BO allocation flags.
95  *
96  * Note that this interface assumes you know what you are doing when forcing the
97  * @page_size. If this is smaller than the regions min_page_size then it can
98  * never be inserted into any GTT, otherwise it might lead to undefined
99  * behaviour.
100  *
101  * Return: The object pointer, which might be an ERR_PTR in the case of failure.
102  */
103 struct drm_i915_gem_object *
104 __i915_gem_object_create_lmem_with_ps(struct drm_i915_private *i915,
105 				      resource_size_t size,
106 				      resource_size_t page_size,
107 				      unsigned int flags)
108 {
109 	return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM_0],
110 					     size, page_size, flags);
111 }
112 
113 struct drm_i915_gem_object *
114 i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
115 				      const void *data, size_t size)
116 {
117 	struct drm_i915_gem_object *obj;
118 	void *map;
119 
120 	obj = i915_gem_object_create_lmem(i915,
121 					  round_up(size, PAGE_SIZE),
122 					  I915_BO_ALLOC_CONTIGUOUS);
123 	if (IS_ERR(obj))
124 		return obj;
125 
126 	map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
127 	if (IS_ERR(map)) {
128 		i915_gem_object_put(obj);
129 		return map;
130 	}
131 
132 	memcpy(map, data, size);
133 
134 	i915_gem_object_flush_map(obj);
135 	__i915_gem_object_release_map(obj);
136 
137 	return obj;
138 }
139 
140 struct drm_i915_gem_object *
141 i915_gem_object_create_lmem(struct drm_i915_private *i915,
142 			    resource_size_t size,
143 			    unsigned int flags)
144 {
145 	return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM_0],
146 					     size, 0, flags);
147 }
148