xref: /linux/drivers/gpu/drm/i915/gem/i915_gem_create.c (revision 14128d64)
1dcaccaf0SMatthew Auld // SPDX-License-Identifier: MIT
2dcaccaf0SMatthew Auld /*
3dcaccaf0SMatthew Auld  * Copyright © 2020 Intel Corporation
4dcaccaf0SMatthew Auld  */
5dcaccaf0SMatthew Auld 
601b94a93SJani Nikula #include <drm/drm_fourcc.h>
701b94a93SJani Nikula 
8acc855d3SJani Nikula #include "display/intel_display.h"
9dcaccaf0SMatthew Auld #include "gem/i915_gem_ioctls.h"
102459e56fSMatthew Auld #include "gem/i915_gem_lmem.h"
11dcaccaf0SMatthew Auld #include "gem/i915_gem_region.h"
12d3ac8d42SDaniele Ceraolo Spurio #include "pxp/intel_pxp.h"
13dcaccaf0SMatthew Auld 
14dcaccaf0SMatthew Auld #include "i915_drv.h"
15be137d79SJani Nikula #include "i915_gem_create.h"
16357814f8SMatthew Auld #include "i915_trace.h"
17ebcb4029SMatthew Auld #include "i915_user_extensions.h"
18dcaccaf0SMatthew Auld 
object_max_page_size(struct intel_memory_region ** placements,unsigned int n_placements)19bf947c98SJason Ekstrand static u32 object_max_page_size(struct intel_memory_region **placements,
20bf947c98SJason Ekstrand 				unsigned int n_placements)
212459e56fSMatthew Auld {
222459e56fSMatthew Auld 	u32 max_page_size = 0;
232459e56fSMatthew Auld 	int i;
242459e56fSMatthew Auld 
25bf947c98SJason Ekstrand 	for (i = 0; i < n_placements; i++) {
26bf947c98SJason Ekstrand 		struct intel_memory_region *mr = placements[i];
272459e56fSMatthew Auld 
282459e56fSMatthew Auld 		GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
292459e56fSMatthew Auld 		max_page_size = max_t(u32, max_page_size, mr->min_page_size);
302459e56fSMatthew Auld 	}
312459e56fSMatthew Auld 
322459e56fSMatthew Auld 	GEM_BUG_ON(!max_page_size);
332459e56fSMatthew Auld 	return max_page_size;
342459e56fSMatthew Auld }
352459e56fSMatthew Auld 
object_set_placements(struct drm_i915_gem_object * obj,struct intel_memory_region ** placements,unsigned int n_placements)3634c7ef0aSJason Ekstrand static int object_set_placements(struct drm_i915_gem_object *obj,
372459e56fSMatthew Auld 				 struct intel_memory_region **placements,
382459e56fSMatthew Auld 				 unsigned int n_placements)
392459e56fSMatthew Auld {
4034c7ef0aSJason Ekstrand 	struct intel_memory_region **arr;
4134c7ef0aSJason Ekstrand 	unsigned int i;
4234c7ef0aSJason Ekstrand 
432459e56fSMatthew Auld 	GEM_BUG_ON(!n_placements);
442459e56fSMatthew Auld 
452459e56fSMatthew Auld 	/*
462459e56fSMatthew Auld 	 * For the common case of one memory region, skip storing an
472459e56fSMatthew Auld 	 * allocated array and just point at the region directly.
482459e56fSMatthew Auld 	 */
492459e56fSMatthew Auld 	if (n_placements == 1) {
502459e56fSMatthew Auld 		struct intel_memory_region *mr = placements[0];
512459e56fSMatthew Auld 		struct drm_i915_private *i915 = mr->i915;
522459e56fSMatthew Auld 
532459e56fSMatthew Auld 		obj->mm.placements = &i915->mm.regions[mr->id];
542459e56fSMatthew Auld 		obj->mm.n_placements = 1;
552459e56fSMatthew Auld 	} else {
5634c7ef0aSJason Ekstrand 		arr = kmalloc_array(n_placements,
5734c7ef0aSJason Ekstrand 				    sizeof(struct intel_memory_region *),
5834c7ef0aSJason Ekstrand 				    GFP_KERNEL);
5934c7ef0aSJason Ekstrand 		if (!arr)
6034c7ef0aSJason Ekstrand 			return -ENOMEM;
6134c7ef0aSJason Ekstrand 
6234c7ef0aSJason Ekstrand 		for (i = 0; i < n_placements; i++)
6334c7ef0aSJason Ekstrand 			arr[i] = placements[i];
6434c7ef0aSJason Ekstrand 
6534c7ef0aSJason Ekstrand 		obj->mm.placements = arr;
662459e56fSMatthew Auld 		obj->mm.n_placements = n_placements;
672459e56fSMatthew Auld 	}
6834c7ef0aSJason Ekstrand 
6934c7ef0aSJason Ekstrand 	return 0;
702459e56fSMatthew Auld }
712459e56fSMatthew Auld 
i915_gem_publish(struct drm_i915_gem_object * obj,struct drm_file * file,u64 * size_p,u32 * handle_p)72357814f8SMatthew Auld static int i915_gem_publish(struct drm_i915_gem_object *obj,
73357814f8SMatthew Auld 			    struct drm_file *file,
74dcaccaf0SMatthew Auld 			    u64 *size_p,
75dcaccaf0SMatthew Auld 			    u32 *handle_p)
76dcaccaf0SMatthew Auld {
77357814f8SMatthew Auld 	u64 size = obj->base.size;
78357814f8SMatthew Auld 	int ret;
79357814f8SMatthew Auld 
80357814f8SMatthew Auld 	ret = drm_gem_handle_create(file, &obj->base, handle_p);
81357814f8SMatthew Auld 	/* drop reference from allocate - handle holds it now */
82357814f8SMatthew Auld 	i915_gem_object_put(obj);
83357814f8SMatthew Auld 	if (ret)
84357814f8SMatthew Auld 		return ret;
85357814f8SMatthew Auld 
86357814f8SMatthew Auld 	*size_p = size;
87357814f8SMatthew Auld 	return 0;
88357814f8SMatthew Auld }
89357814f8SMatthew Auld 
90d3ac8d42SDaniele Ceraolo Spurio static struct drm_i915_gem_object *
__i915_gem_object_create_user_ext(struct drm_i915_private * i915,u64 size,struct intel_memory_region ** placements,unsigned int n_placements,unsigned int ext_flags)91d3ac8d42SDaniele Ceraolo Spurio __i915_gem_object_create_user_ext(struct drm_i915_private *i915, u64 size,
92bf947c98SJason Ekstrand 				  struct intel_memory_region **placements,
93d3ac8d42SDaniele Ceraolo Spurio 				  unsigned int n_placements,
94d3ac8d42SDaniele Ceraolo Spurio 				  unsigned int ext_flags)
95357814f8SMatthew Auld {
96bf947c98SJason Ekstrand 	struct intel_memory_region *mr = placements[0];
97bf947c98SJason Ekstrand 	struct drm_i915_gem_object *obj;
980e997a36SMatthew Auld 	unsigned int flags;
99dcaccaf0SMatthew Auld 	int ret;
100dcaccaf0SMatthew Auld 
101bf947c98SJason Ekstrand 	i915_gem_flush_free_objects(i915);
102bf947c98SJason Ekstrand 
103bf947c98SJason Ekstrand 	size = round_up(size, object_max_page_size(placements, n_placements));
104dcaccaf0SMatthew Auld 	if (size == 0)
105bf947c98SJason Ekstrand 		return ERR_PTR(-EINVAL);
106dcaccaf0SMatthew Auld 
107dcaccaf0SMatthew Auld 	/* For most of the ABI (e.g. mmap) we think in system pages */
108dcaccaf0SMatthew Auld 	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
109dcaccaf0SMatthew Auld 
110357814f8SMatthew Auld 	if (i915_gem_object_size_2big(size))
111bf947c98SJason Ekstrand 		return ERR_PTR(-E2BIG);
112bf947c98SJason Ekstrand 
113bf947c98SJason Ekstrand 	obj = i915_gem_object_alloc();
114bf947c98SJason Ekstrand 	if (!obj)
115bf947c98SJason Ekstrand 		return ERR_PTR(-ENOMEM);
116bf947c98SJason Ekstrand 
117bf947c98SJason Ekstrand 	ret = object_set_placements(obj, placements, n_placements);
118bf947c98SJason Ekstrand 	if (ret)
119bf947c98SJason Ekstrand 		goto object_free;
120dcaccaf0SMatthew Auld 
1210e997a36SMatthew Auld 	/*
122213d5092SThomas Hellström 	 * I915_BO_ALLOC_USER will make sure the object is cleared before
123213d5092SThomas Hellström 	 * any user access.
1240e997a36SMatthew Auld 	 */
125213d5092SThomas Hellström 	flags = I915_BO_ALLOC_USER;
1260e997a36SMatthew Auld 
1279b78b5daSMatthew Auld 	ret = mr->ops->init_object(mr, obj, I915_BO_INVALID_OFFSET, size, 0, flags);
128dcaccaf0SMatthew Auld 	if (ret)
129bf947c98SJason Ekstrand 		goto object_free;
130dcaccaf0SMatthew Auld 
131357814f8SMatthew Auld 	GEM_BUG_ON(size != obj->base.size);
132357814f8SMatthew Auld 
133d3ac8d42SDaniele Ceraolo Spurio 	/* Add any flag set by create_ext options */
134d3ac8d42SDaniele Ceraolo Spurio 	obj->flags |= ext_flags;
135d3ac8d42SDaniele Ceraolo Spurio 
136357814f8SMatthew Auld 	trace_i915_gem_object_create(obj);
137bf947c98SJason Ekstrand 	return obj;
138bf947c98SJason Ekstrand 
139bf947c98SJason Ekstrand object_free:
140bf947c98SJason Ekstrand 	if (obj->mm.n_placements > 1)
141bf947c98SJason Ekstrand 		kfree(obj->mm.placements);
142bf947c98SJason Ekstrand 	i915_gem_object_free(obj);
143bf947c98SJason Ekstrand 	return ERR_PTR(ret);
144dcaccaf0SMatthew Auld }
145dcaccaf0SMatthew Auld 
146d3ac8d42SDaniele Ceraolo Spurio /**
147b29b32a2SLee Jones  * __i915_gem_object_create_user - Creates a new object using the same path as
148b29b32a2SLee Jones  *                                 DRM_I915_GEM_CREATE_EXT
149d3ac8d42SDaniele Ceraolo Spurio  * @i915: i915 private
150d3ac8d42SDaniele Ceraolo Spurio  * @size: size of the buffer, in bytes
151d3ac8d42SDaniele Ceraolo Spurio  * @placements: possible placement regions, in priority order
152d3ac8d42SDaniele Ceraolo Spurio  * @n_placements: number of possible placement regions
153d3ac8d42SDaniele Ceraolo Spurio  *
154d3ac8d42SDaniele Ceraolo Spurio  * This function is exposed primarily for selftests and does very little
155d3ac8d42SDaniele Ceraolo Spurio  * error checking.  It is assumed that the set of placement regions has
156d3ac8d42SDaniele Ceraolo Spurio  * already been verified to be valid.
157d3ac8d42SDaniele Ceraolo Spurio  */
158d3ac8d42SDaniele Ceraolo Spurio struct drm_i915_gem_object *
__i915_gem_object_create_user(struct drm_i915_private * i915,u64 size,struct intel_memory_region ** placements,unsigned int n_placements)159d3ac8d42SDaniele Ceraolo Spurio __i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
160d3ac8d42SDaniele Ceraolo Spurio 			      struct intel_memory_region **placements,
161d3ac8d42SDaniele Ceraolo Spurio 			      unsigned int n_placements)
162d3ac8d42SDaniele Ceraolo Spurio {
163d3ac8d42SDaniele Ceraolo Spurio 	return __i915_gem_object_create_user_ext(i915, size, placements,
164d3ac8d42SDaniele Ceraolo Spurio 						 n_placements, 0);
165d3ac8d42SDaniele Ceraolo Spurio }
166d3ac8d42SDaniele Ceraolo Spurio 
167dcaccaf0SMatthew Auld int
i915_gem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)168dcaccaf0SMatthew Auld i915_gem_dumb_create(struct drm_file *file,
169dcaccaf0SMatthew Auld 		     struct drm_device *dev,
170dcaccaf0SMatthew Auld 		     struct drm_mode_create_dumb *args)
171dcaccaf0SMatthew Auld {
172357814f8SMatthew Auld 	struct drm_i915_gem_object *obj;
1732459e56fSMatthew Auld 	struct intel_memory_region *mr;
174dcaccaf0SMatthew Auld 	enum intel_memory_type mem_type;
175dcaccaf0SMatthew Auld 	int cpp = DIV_ROUND_UP(args->bpp, 8);
176dcaccaf0SMatthew Auld 	u32 format;
177dcaccaf0SMatthew Auld 
178dcaccaf0SMatthew Auld 	switch (cpp) {
179dcaccaf0SMatthew Auld 	case 1:
180dcaccaf0SMatthew Auld 		format = DRM_FORMAT_C8;
181dcaccaf0SMatthew Auld 		break;
182dcaccaf0SMatthew Auld 	case 2:
183dcaccaf0SMatthew Auld 		format = DRM_FORMAT_RGB565;
184dcaccaf0SMatthew Auld 		break;
185dcaccaf0SMatthew Auld 	case 4:
186dcaccaf0SMatthew Auld 		format = DRM_FORMAT_XRGB8888;
187dcaccaf0SMatthew Auld 		break;
188dcaccaf0SMatthew Auld 	default:
189dcaccaf0SMatthew Auld 		return -EINVAL;
190dcaccaf0SMatthew Auld 	}
191dcaccaf0SMatthew Auld 
192dcaccaf0SMatthew Auld 	/* have to work out size/pitch and return them */
193dcaccaf0SMatthew Auld 	args->pitch = ALIGN(args->width * cpp, 64);
194dcaccaf0SMatthew Auld 
195dcaccaf0SMatthew Auld 	/* align stride to page size so that we can remap */
196dcaccaf0SMatthew Auld 	if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
197dcaccaf0SMatthew Auld 						    DRM_FORMAT_MOD_LINEAR))
198dcaccaf0SMatthew Auld 		args->pitch = ALIGN(args->pitch, 4096);
199dcaccaf0SMatthew Auld 
200dcaccaf0SMatthew Auld 	if (args->pitch < args->width)
201dcaccaf0SMatthew Auld 		return -EINVAL;
202dcaccaf0SMatthew Auld 
203dcaccaf0SMatthew Auld 	args->size = mul_u32_u32(args->pitch, args->height);
204dcaccaf0SMatthew Auld 
205dcaccaf0SMatthew Auld 	mem_type = INTEL_MEMORY_SYSTEM;
206dcaccaf0SMatthew Auld 	if (HAS_LMEM(to_i915(dev)))
207dcaccaf0SMatthew Auld 		mem_type = INTEL_MEMORY_LOCAL;
208dcaccaf0SMatthew Auld 
2092459e56fSMatthew Auld 	mr = intel_memory_region_by_type(to_i915(dev), mem_type);
2102459e56fSMatthew Auld 
211bf947c98SJason Ekstrand 	obj = __i915_gem_object_create_user(to_i915(dev), args->size, &mr, 1);
212bf947c98SJason Ekstrand 	if (IS_ERR(obj))
213bf947c98SJason Ekstrand 		return PTR_ERR(obj);
214357814f8SMatthew Auld 
215357814f8SMatthew Auld 	return i915_gem_publish(obj, file, &args->size, &args->handle);
216dcaccaf0SMatthew Auld }
217dcaccaf0SMatthew Auld 
218dcaccaf0SMatthew Auld /**
219b29b32a2SLee Jones  * i915_gem_create_ioctl - Creates a new mm object and returns a handle to it.
220dcaccaf0SMatthew Auld  * @dev: drm device pointer
221dcaccaf0SMatthew Auld  * @data: ioctl data blob
222dcaccaf0SMatthew Auld  * @file: drm file pointer
223dcaccaf0SMatthew Auld  */
224dcaccaf0SMatthew Auld int
i915_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file)225dcaccaf0SMatthew Auld i915_gem_create_ioctl(struct drm_device *dev, void *data,
226dcaccaf0SMatthew Auld 		      struct drm_file *file)
227dcaccaf0SMatthew Auld {
228dcaccaf0SMatthew Auld 	struct drm_i915_private *i915 = to_i915(dev);
229dcaccaf0SMatthew Auld 	struct drm_i915_gem_create *args = data;
230357814f8SMatthew Auld 	struct drm_i915_gem_object *obj;
2312459e56fSMatthew Auld 	struct intel_memory_region *mr;
232357814f8SMatthew Auld 
2332459e56fSMatthew Auld 	mr = intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
2342459e56fSMatthew Auld 
235bf947c98SJason Ekstrand 	obj = __i915_gem_object_create_user(i915, args->size, &mr, 1);
236bf947c98SJason Ekstrand 	if (IS_ERR(obj))
237bf947c98SJason Ekstrand 		return PTR_ERR(obj);
238357814f8SMatthew Auld 
239357814f8SMatthew Auld 	return i915_gem_publish(obj, file, &args->size, &args->handle);
240dcaccaf0SMatthew Auld }
241ebcb4029SMatthew Auld 
242ebcb4029SMatthew Auld struct create_ext {
243ebcb4029SMatthew Auld 	struct drm_i915_private *i915;
24434c7ef0aSJason Ekstrand 	struct intel_memory_region *placements[INTEL_REGION_UNKNOWN];
24534c7ef0aSJason Ekstrand 	unsigned int n_placements;
246525e93f6SMatthew Auld 	unsigned int placement_mask;
247d3ac8d42SDaniele Ceraolo Spurio 	unsigned long flags;
24881b1b599SFei Yang 	unsigned int pat_index;
249ebcb4029SMatthew Auld };
250ebcb4029SMatthew Auld 
repr_placements(char * buf,size_t size,struct intel_memory_region ** placements,int n_placements)2512459e56fSMatthew Auld static void repr_placements(char *buf, size_t size,
2522459e56fSMatthew Auld 			    struct intel_memory_region **placements,
2532459e56fSMatthew Auld 			    int n_placements)
2542459e56fSMatthew Auld {
2552459e56fSMatthew Auld 	int i;
2562459e56fSMatthew Auld 
2572459e56fSMatthew Auld 	buf[0] = '\0';
2582459e56fSMatthew Auld 
2592459e56fSMatthew Auld 	for (i = 0; i < n_placements; i++) {
2602459e56fSMatthew Auld 		struct intel_memory_region *mr = placements[i];
2612459e56fSMatthew Auld 		int r;
2622459e56fSMatthew Auld 
2632459e56fSMatthew Auld 		r = snprintf(buf, size, "\n  %s -> { class: %d, inst: %d }",
2642459e56fSMatthew Auld 			     mr->name, mr->type, mr->instance);
2652459e56fSMatthew Auld 		if (r >= size)
2662459e56fSMatthew Auld 			return;
2672459e56fSMatthew Auld 
2682459e56fSMatthew Auld 		buf += r;
2692459e56fSMatthew Auld 		size -= r;
2702459e56fSMatthew Auld 	}
2712459e56fSMatthew Auld }
2722459e56fSMatthew Auld 
set_placements(struct drm_i915_gem_create_ext_memory_regions * args,struct create_ext * ext_data)2732459e56fSMatthew Auld static int set_placements(struct drm_i915_gem_create_ext_memory_regions *args,
2742459e56fSMatthew Auld 			  struct create_ext *ext_data)
2752459e56fSMatthew Auld {
2762459e56fSMatthew Auld 	struct drm_i915_private *i915 = ext_data->i915;
2772459e56fSMatthew Auld 	struct drm_i915_gem_memory_class_instance __user *uregions =
2782459e56fSMatthew Auld 		u64_to_user_ptr(args->regions);
27934c7ef0aSJason Ekstrand 	struct intel_memory_region *placements[INTEL_REGION_UNKNOWN];
2802459e56fSMatthew Auld 	u32 mask;
2812459e56fSMatthew Auld 	int i, ret = 0;
2822459e56fSMatthew Auld 
2832459e56fSMatthew Auld 	if (args->pad) {
2842459e56fSMatthew Auld 		drm_dbg(&i915->drm, "pad should be zero\n");
2852459e56fSMatthew Auld 		ret = -EINVAL;
2862459e56fSMatthew Auld 	}
2872459e56fSMatthew Auld 
2882459e56fSMatthew Auld 	if (!args->num_regions) {
2892459e56fSMatthew Auld 		drm_dbg(&i915->drm, "num_regions is zero\n");
2902459e56fSMatthew Auld 		ret = -EINVAL;
2912459e56fSMatthew Auld 	}
2922459e56fSMatthew Auld 
29334c7ef0aSJason Ekstrand 	BUILD_BUG_ON(ARRAY_SIZE(i915->mm.regions) != ARRAY_SIZE(placements));
29434c7ef0aSJason Ekstrand 	BUILD_BUG_ON(ARRAY_SIZE(ext_data->placements) != ARRAY_SIZE(placements));
2952459e56fSMatthew Auld 	if (args->num_regions > ARRAY_SIZE(i915->mm.regions)) {
2962459e56fSMatthew Auld 		drm_dbg(&i915->drm, "num_regions is too large\n");
2972459e56fSMatthew Auld 		ret = -EINVAL;
2982459e56fSMatthew Auld 	}
2992459e56fSMatthew Auld 
3002459e56fSMatthew Auld 	if (ret)
3012459e56fSMatthew Auld 		return ret;
3022459e56fSMatthew Auld 
3032459e56fSMatthew Auld 	mask = 0;
3042459e56fSMatthew Auld 	for (i = 0; i < args->num_regions; i++) {
3052459e56fSMatthew Auld 		struct drm_i915_gem_memory_class_instance region;
3062459e56fSMatthew Auld 		struct intel_memory_region *mr;
3072459e56fSMatthew Auld 
30834c7ef0aSJason Ekstrand 		if (copy_from_user(&region, uregions, sizeof(region)))
30934c7ef0aSJason Ekstrand 			return -EFAULT;
3102459e56fSMatthew Auld 
3112459e56fSMatthew Auld 		mr = intel_memory_region_lookup(i915,
3122459e56fSMatthew Auld 						region.memory_class,
3132459e56fSMatthew Auld 						region.memory_instance);
3142459e56fSMatthew Auld 		if (!mr || mr->private) {
3152459e56fSMatthew Auld 			drm_dbg(&i915->drm, "Device is missing region { class: %d, inst: %d } at index = %d\n",
3162459e56fSMatthew Auld 				region.memory_class, region.memory_instance, i);
3172459e56fSMatthew Auld 			ret = -EINVAL;
3182459e56fSMatthew Auld 			goto out_dump;
3192459e56fSMatthew Auld 		}
3202459e56fSMatthew Auld 
3212459e56fSMatthew Auld 		if (mask & BIT(mr->id)) {
3222459e56fSMatthew Auld 			drm_dbg(&i915->drm, "Found duplicate placement %s -> { class: %d, inst: %d } at index = %d\n",
3232459e56fSMatthew Auld 				mr->name, region.memory_class,
3242459e56fSMatthew Auld 				region.memory_instance, i);
3252459e56fSMatthew Auld 			ret = -EINVAL;
3262459e56fSMatthew Auld 			goto out_dump;
3272459e56fSMatthew Auld 		}
3282459e56fSMatthew Auld 
3292459e56fSMatthew Auld 		placements[i] = mr;
3302459e56fSMatthew Auld 		mask |= BIT(mr->id);
3312459e56fSMatthew Auld 
3322459e56fSMatthew Auld 		++uregions;
3332459e56fSMatthew Auld 	}
3342459e56fSMatthew Auld 
33534c7ef0aSJason Ekstrand 	if (ext_data->n_placements) {
3362459e56fSMatthew Auld 		ret = -EINVAL;
3372459e56fSMatthew Auld 		goto out_dump;
3382459e56fSMatthew Auld 	}
3392459e56fSMatthew Auld 
34034c7ef0aSJason Ekstrand 	ext_data->n_placements = args->num_regions;
34134c7ef0aSJason Ekstrand 	for (i = 0; i < args->num_regions; i++)
34234c7ef0aSJason Ekstrand 		ext_data->placements[i] = placements[i];
3432459e56fSMatthew Auld 
344525e93f6SMatthew Auld 	ext_data->placement_mask = mask;
3452459e56fSMatthew Auld 	return 0;
3462459e56fSMatthew Auld 
3472459e56fSMatthew Auld out_dump:
3482459e56fSMatthew Auld 	if (1) {
3492459e56fSMatthew Auld 		char buf[256];
3502459e56fSMatthew Auld 
35134c7ef0aSJason Ekstrand 		if (ext_data->n_placements) {
3522459e56fSMatthew Auld 			repr_placements(buf,
3532459e56fSMatthew Auld 					sizeof(buf),
35434c7ef0aSJason Ekstrand 					ext_data->placements,
35534c7ef0aSJason Ekstrand 					ext_data->n_placements);
3562459e56fSMatthew Auld 			drm_dbg(&i915->drm,
3572459e56fSMatthew Auld 				"Placements were already set in previous EXT. Existing placements: %s\n",
3582459e56fSMatthew Auld 				buf);
3592459e56fSMatthew Auld 		}
3602459e56fSMatthew Auld 
3612459e56fSMatthew Auld 		repr_placements(buf, sizeof(buf), placements, i);
3622459e56fSMatthew Auld 		drm_dbg(&i915->drm, "New placements(so far validated): %s\n", buf);
3632459e56fSMatthew Auld 	}
3642459e56fSMatthew Auld 
3652459e56fSMatthew Auld 	return ret;
3662459e56fSMatthew Auld }
3672459e56fSMatthew Auld 
ext_set_placements(struct i915_user_extension __user * base,void * data)3682459e56fSMatthew Auld static int ext_set_placements(struct i915_user_extension __user *base,
3692459e56fSMatthew Auld 			      void *data)
3702459e56fSMatthew Auld {
3712459e56fSMatthew Auld 	struct drm_i915_gem_create_ext_memory_regions ext;
3722459e56fSMatthew Auld 
3732459e56fSMatthew Auld 	if (copy_from_user(&ext, base, sizeof(ext)))
3742459e56fSMatthew Auld 		return -EFAULT;
3752459e56fSMatthew Auld 
3762459e56fSMatthew Auld 	return set_placements(&ext, data);
3772459e56fSMatthew Auld }
3782459e56fSMatthew Auld 
ext_set_protected(struct i915_user_extension __user * base,void * data)379d3ac8d42SDaniele Ceraolo Spurio static int ext_set_protected(struct i915_user_extension __user *base, void *data)
380d3ac8d42SDaniele Ceraolo Spurio {
381d3ac8d42SDaniele Ceraolo Spurio 	struct drm_i915_gem_create_ext_protected_content ext;
382d3ac8d42SDaniele Ceraolo Spurio 	struct create_ext *ext_data = data;
383d3ac8d42SDaniele Ceraolo Spurio 
384d3ac8d42SDaniele Ceraolo Spurio 	if (copy_from_user(&ext, base, sizeof(ext)))
385d3ac8d42SDaniele Ceraolo Spurio 		return -EFAULT;
386d3ac8d42SDaniele Ceraolo Spurio 
387d3ac8d42SDaniele Ceraolo Spurio 	if (ext.flags)
388d3ac8d42SDaniele Ceraolo Spurio 		return -EINVAL;
389d3ac8d42SDaniele Ceraolo Spurio 
390f67986b0SAlan Previn 	if (!intel_pxp_is_enabled(ext_data->i915->pxp))
391d3ac8d42SDaniele Ceraolo Spurio 		return -ENODEV;
392d3ac8d42SDaniele Ceraolo Spurio 
393d3ac8d42SDaniele Ceraolo Spurio 	ext_data->flags |= I915_BO_PROTECTED;
394d3ac8d42SDaniele Ceraolo Spurio 
395d3ac8d42SDaniele Ceraolo Spurio 	return 0;
396d3ac8d42SDaniele Ceraolo Spurio }
397d3ac8d42SDaniele Ceraolo Spurio 
ext_set_pat(struct i915_user_extension __user * base,void * data)39881b1b599SFei Yang static int ext_set_pat(struct i915_user_extension __user *base, void *data)
39981b1b599SFei Yang {
40081b1b599SFei Yang 	struct create_ext *ext_data = data;
40181b1b599SFei Yang 	struct drm_i915_private *i915 = ext_data->i915;
40281b1b599SFei Yang 	struct drm_i915_gem_create_ext_set_pat ext;
40381b1b599SFei Yang 	unsigned int max_pat_index;
40481b1b599SFei Yang 
40581b1b599SFei Yang 	BUILD_BUG_ON(sizeof(struct drm_i915_gem_create_ext_set_pat) !=
40681b1b599SFei Yang 		     offsetofend(struct drm_i915_gem_create_ext_set_pat, rsvd));
40781b1b599SFei Yang 
408*14128d64SMatt Roper 	/* Limiting the extension only to Xe_LPG and beyond */
409*14128d64SMatt Roper 	if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 70))
41081b1b599SFei Yang 		return -ENODEV;
41181b1b599SFei Yang 
41281b1b599SFei Yang 	if (copy_from_user(&ext, base, sizeof(ext)))
41381b1b599SFei Yang 		return -EFAULT;
41481b1b599SFei Yang 
41581b1b599SFei Yang 	max_pat_index = INTEL_INFO(i915)->max_pat_index;
41681b1b599SFei Yang 
41781b1b599SFei Yang 	if (ext.pat_index > max_pat_index) {
41881b1b599SFei Yang 		drm_dbg(&i915->drm, "PAT index is invalid: %u\n",
41981b1b599SFei Yang 			ext.pat_index);
42081b1b599SFei Yang 		return -EINVAL;
42181b1b599SFei Yang 	}
42281b1b599SFei Yang 
42381b1b599SFei Yang 	ext_data->pat_index = ext.pat_index;
42481b1b599SFei Yang 
42581b1b599SFei Yang 	return 0;
42681b1b599SFei Yang }
42781b1b599SFei Yang 
428ebcb4029SMatthew Auld static const i915_user_extension_fn create_extensions[] = {
4292459e56fSMatthew Auld 	[I915_GEM_CREATE_EXT_MEMORY_REGIONS] = ext_set_placements,
430d3ac8d42SDaniele Ceraolo Spurio 	[I915_GEM_CREATE_EXT_PROTECTED_CONTENT] = ext_set_protected,
43181b1b599SFei Yang 	[I915_GEM_CREATE_EXT_SET_PAT] = ext_set_pat,
432ebcb4029SMatthew Auld };
433ebcb4029SMatthew Auld 
43481b1b599SFei Yang #define PAT_INDEX_NOT_SET	0xffff
435ebcb4029SMatthew Auld /**
436b29b32a2SLee Jones  * i915_gem_create_ext_ioctl - Creates a new mm object and returns a handle to it.
437ebcb4029SMatthew Auld  * @dev: drm device pointer
438ebcb4029SMatthew Auld  * @data: ioctl data blob
439ebcb4029SMatthew Auld  * @file: drm file pointer
440ebcb4029SMatthew Auld  */
441ebcb4029SMatthew Auld int
i915_gem_create_ext_ioctl(struct drm_device * dev,void * data,struct drm_file * file)442ebcb4029SMatthew Auld i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
443ebcb4029SMatthew Auld 			  struct drm_file *file)
444ebcb4029SMatthew Auld {
445ebcb4029SMatthew Auld 	struct drm_i915_private *i915 = to_i915(dev);
446ebcb4029SMatthew Auld 	struct drm_i915_gem_create_ext *args = data;
447ebcb4029SMatthew Auld 	struct create_ext ext_data = { .i915 = i915 };
448ebcb4029SMatthew Auld 	struct drm_i915_gem_object *obj;
449ebcb4029SMatthew Auld 	int ret;
450ebcb4029SMatthew Auld 
451525e93f6SMatthew Auld 	if (args->flags & ~I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS)
452ebcb4029SMatthew Auld 		return -EINVAL;
453ebcb4029SMatthew Auld 
45481b1b599SFei Yang 	ext_data.pat_index = PAT_INDEX_NOT_SET;
455ebcb4029SMatthew Auld 	ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
456ebcb4029SMatthew Auld 				   create_extensions,
457ebcb4029SMatthew Auld 				   ARRAY_SIZE(create_extensions),
458ebcb4029SMatthew Auld 				   &ext_data);
459ebcb4029SMatthew Auld 	if (ret)
460bf947c98SJason Ekstrand 		return ret;
461ebcb4029SMatthew Auld 
46234c7ef0aSJason Ekstrand 	if (!ext_data.n_placements) {
46334c7ef0aSJason Ekstrand 		ext_data.placements[0] =
4642459e56fSMatthew Auld 			intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
46534c7ef0aSJason Ekstrand 		ext_data.n_placements = 1;
4662459e56fSMatthew Auld 	}
4672459e56fSMatthew Auld 
468525e93f6SMatthew Auld 	if (args->flags & I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) {
469525e93f6SMatthew Auld 		if (ext_data.n_placements == 1)
470525e93f6SMatthew Auld 			return -EINVAL;
471525e93f6SMatthew Auld 
4721dbd07e0SMatthew Auld 		/*
473525e93f6SMatthew Auld 		 * We always need to be able to spill to system memory, if we
474525e93f6SMatthew Auld 		 * can't place in the mappable part of LMEM.
4751dbd07e0SMatthew Auld 		 */
476525e93f6SMatthew Auld 		if (!(ext_data.placement_mask & BIT(INTEL_REGION_SMEM)))
477525e93f6SMatthew Auld 			return -EINVAL;
478525e93f6SMatthew Auld 	} else {
4791dbd07e0SMatthew Auld 		if (ext_data.n_placements > 1 ||
4801dbd07e0SMatthew Auld 		    ext_data.placements[0]->type != INTEL_MEMORY_SYSTEM)
4811dbd07e0SMatthew Auld 			ext_data.flags |= I915_BO_ALLOC_GPU_ONLY;
482525e93f6SMatthew Auld 	}
4831dbd07e0SMatthew Auld 
484d3ac8d42SDaniele Ceraolo Spurio 	obj = __i915_gem_object_create_user_ext(i915, args->size,
485bf947c98SJason Ekstrand 						ext_data.placements,
486d3ac8d42SDaniele Ceraolo Spurio 						ext_data.n_placements,
487d3ac8d42SDaniele Ceraolo Spurio 						ext_data.flags);
488bf947c98SJason Ekstrand 	if (IS_ERR(obj))
489bf947c98SJason Ekstrand 		return PTR_ERR(obj);
490ebcb4029SMatthew Auld 
49181b1b599SFei Yang 	if (ext_data.pat_index != PAT_INDEX_NOT_SET) {
49281b1b599SFei Yang 		i915_gem_object_set_pat_index(obj, ext_data.pat_index);
49381b1b599SFei Yang 		/* Mark pat_index is set by UMD */
49481b1b599SFei Yang 		obj->pat_set_by_user = true;
49581b1b599SFei Yang 	}
49681b1b599SFei Yang 
497ebcb4029SMatthew Auld 	return i915_gem_publish(obj, file, &args->size, &args->handle);
498ebcb4029SMatthew Auld }
499