xref: /openbsd/sys/dev/pci/drm/i915/gem/i915_gem_create.c (revision ddf58b8f)
15ca02815Sjsg // SPDX-License-Identifier: MIT
25ca02815Sjsg /*
35ca02815Sjsg  * Copyright © 2020 Intel Corporation
45ca02815Sjsg  */
55ca02815Sjsg 
61bb76ff1Sjsg #include <drm/drm_fourcc.h>
71bb76ff1Sjsg 
8f005ef32Sjsg #include "display/intel_display.h"
95ca02815Sjsg #include "gem/i915_gem_ioctls.h"
105ca02815Sjsg #include "gem/i915_gem_lmem.h"
115ca02815Sjsg #include "gem/i915_gem_region.h"
121bb76ff1Sjsg #include "pxp/intel_pxp.h"
135ca02815Sjsg 
145ca02815Sjsg #include "i915_drv.h"
151bb76ff1Sjsg #include "i915_gem_create.h"
165ca02815Sjsg #include "i915_trace.h"
175ca02815Sjsg #include "i915_user_extensions.h"
185ca02815Sjsg 
object_max_page_size(struct intel_memory_region ** placements,unsigned int n_placements)195ca02815Sjsg static u32 object_max_page_size(struct intel_memory_region **placements,
205ca02815Sjsg 				unsigned int n_placements)
215ca02815Sjsg {
225ca02815Sjsg 	u32 max_page_size = 0;
235ca02815Sjsg 	int i;
245ca02815Sjsg 
255ca02815Sjsg 	for (i = 0; i < n_placements; i++) {
265ca02815Sjsg 		struct intel_memory_region *mr = placements[i];
275ca02815Sjsg 
285ca02815Sjsg 		GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
295ca02815Sjsg 		max_page_size = max_t(u32, max_page_size, mr->min_page_size);
305ca02815Sjsg 	}
315ca02815Sjsg 
325ca02815Sjsg 	GEM_BUG_ON(!max_page_size);
335ca02815Sjsg 	return max_page_size;
345ca02815Sjsg }
355ca02815Sjsg 
object_set_placements(struct drm_i915_gem_object * obj,struct intel_memory_region ** placements,unsigned int n_placements)365ca02815Sjsg static int object_set_placements(struct drm_i915_gem_object *obj,
375ca02815Sjsg 				 struct intel_memory_region **placements,
385ca02815Sjsg 				 unsigned int n_placements)
395ca02815Sjsg {
405ca02815Sjsg 	struct intel_memory_region **arr;
415ca02815Sjsg 	unsigned int i;
425ca02815Sjsg 
435ca02815Sjsg 	GEM_BUG_ON(!n_placements);
445ca02815Sjsg 
455ca02815Sjsg 	/*
465ca02815Sjsg 	 * For the common case of one memory region, skip storing an
475ca02815Sjsg 	 * allocated array and just point at the region directly.
485ca02815Sjsg 	 */
495ca02815Sjsg 	if (n_placements == 1) {
505ca02815Sjsg 		struct intel_memory_region *mr = placements[0];
515ca02815Sjsg 		struct drm_i915_private *i915 = mr->i915;
525ca02815Sjsg 
535ca02815Sjsg 		obj->mm.placements = &i915->mm.regions[mr->id];
545ca02815Sjsg 		obj->mm.n_placements = 1;
555ca02815Sjsg 	} else {
565ca02815Sjsg 		arr = kmalloc_array(n_placements,
575ca02815Sjsg 				    sizeof(struct intel_memory_region *),
585ca02815Sjsg 				    GFP_KERNEL);
595ca02815Sjsg 		if (!arr)
605ca02815Sjsg 			return -ENOMEM;
615ca02815Sjsg 
625ca02815Sjsg 		for (i = 0; i < n_placements; i++)
635ca02815Sjsg 			arr[i] = placements[i];
645ca02815Sjsg 
655ca02815Sjsg 		obj->mm.placements = arr;
665ca02815Sjsg 		obj->mm.n_placements = n_placements;
675ca02815Sjsg 	}
685ca02815Sjsg 
695ca02815Sjsg 	return 0;
705ca02815Sjsg }
715ca02815Sjsg 
i915_gem_publish(struct drm_i915_gem_object * obj,struct drm_file * file,u64 * size_p,u32 * handle_p)725ca02815Sjsg static int i915_gem_publish(struct drm_i915_gem_object *obj,
735ca02815Sjsg 			    struct drm_file *file,
745ca02815Sjsg 			    u64 *size_p,
755ca02815Sjsg 			    u32 *handle_p)
765ca02815Sjsg {
775ca02815Sjsg 	u64 size = obj->base.size;
785ca02815Sjsg 	int ret;
795ca02815Sjsg 
805ca02815Sjsg 	ret = drm_gem_handle_create(file, &obj->base, handle_p);
815ca02815Sjsg 	/* drop reference from allocate - handle holds it now */
825ca02815Sjsg 	i915_gem_object_put(obj);
835ca02815Sjsg 	if (ret)
845ca02815Sjsg 		return ret;
855ca02815Sjsg 
865ca02815Sjsg 	*size_p = size;
875ca02815Sjsg 	return 0;
885ca02815Sjsg }
895ca02815Sjsg 
901bb76ff1Sjsg static struct drm_i915_gem_object *
__i915_gem_object_create_user_ext(struct drm_i915_private * i915,u64 size,struct intel_memory_region ** placements,unsigned int n_placements,unsigned int ext_flags)911bb76ff1Sjsg __i915_gem_object_create_user_ext(struct drm_i915_private *i915, u64 size,
925ca02815Sjsg 				  struct intel_memory_region **placements,
931bb76ff1Sjsg 				  unsigned int n_placements,
941bb76ff1Sjsg 				  unsigned int ext_flags)
955ca02815Sjsg {
965ca02815Sjsg 	struct intel_memory_region *mr = placements[0];
975ca02815Sjsg 	struct drm_i915_gem_object *obj;
985ca02815Sjsg 	unsigned int flags;
995ca02815Sjsg 	int ret;
1005ca02815Sjsg 
1015ca02815Sjsg 	i915_gem_flush_free_objects(i915);
1025ca02815Sjsg 
1035ca02815Sjsg 	size = round_up(size, object_max_page_size(placements, n_placements));
1045ca02815Sjsg 	if (size == 0)
1055ca02815Sjsg 		return ERR_PTR(-EINVAL);
1065ca02815Sjsg 
1075ca02815Sjsg 	/* For most of the ABI (e.g. mmap) we think in system pages */
1085ca02815Sjsg 	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
1095ca02815Sjsg 
1105ca02815Sjsg 	if (i915_gem_object_size_2big(size))
1115ca02815Sjsg 		return ERR_PTR(-E2BIG);
1125ca02815Sjsg 
1135ca02815Sjsg 	obj = i915_gem_object_alloc();
1145ca02815Sjsg 	if (!obj)
1155ca02815Sjsg 		return ERR_PTR(-ENOMEM);
1165ca02815Sjsg 
1175ca02815Sjsg 	ret = object_set_placements(obj, placements, n_placements);
1185ca02815Sjsg 	if (ret)
1195ca02815Sjsg 		goto object_free;
1205ca02815Sjsg 
1215ca02815Sjsg 	/*
1225ca02815Sjsg 	 * I915_BO_ALLOC_USER will make sure the object is cleared before
1235ca02815Sjsg 	 * any user access.
1245ca02815Sjsg 	 */
1255ca02815Sjsg 	flags = I915_BO_ALLOC_USER;
1265ca02815Sjsg 
1271bb76ff1Sjsg 	ret = mr->ops->init_object(mr, obj, I915_BO_INVALID_OFFSET, size, 0, flags);
1285ca02815Sjsg 	if (ret)
1295ca02815Sjsg 		goto object_free;
1305ca02815Sjsg 
1315ca02815Sjsg 	GEM_BUG_ON(size != obj->base.size);
1325ca02815Sjsg 
1331bb76ff1Sjsg 	/* Add any flag set by create_ext options */
1341bb76ff1Sjsg 	obj->flags |= ext_flags;
1351bb76ff1Sjsg 
1365ca02815Sjsg 	trace_i915_gem_object_create(obj);
1375ca02815Sjsg 	return obj;
1385ca02815Sjsg 
1395ca02815Sjsg object_free:
1405ca02815Sjsg 	if (obj->mm.n_placements > 1)
1415ca02815Sjsg 		kfree(obj->mm.placements);
1425ca02815Sjsg 	i915_gem_object_free(obj);
1435ca02815Sjsg 	return ERR_PTR(ret);
1445ca02815Sjsg }
1455ca02815Sjsg 
1461bb76ff1Sjsg /**
147f005ef32Sjsg  * __i915_gem_object_create_user - Creates a new object using the same path as
148f005ef32Sjsg  *                                 DRM_I915_GEM_CREATE_EXT
1491bb76ff1Sjsg  * @i915: i915 private
1501bb76ff1Sjsg  * @size: size of the buffer, in bytes
1511bb76ff1Sjsg  * @placements: possible placement regions, in priority order
1521bb76ff1Sjsg  * @n_placements: number of possible placement regions
1531bb76ff1Sjsg  *
1541bb76ff1Sjsg  * This function is exposed primarily for selftests and does very little
1551bb76ff1Sjsg  * error checking.  It is assumed that the set of placement regions has
1561bb76ff1Sjsg  * already been verified to be valid.
1571bb76ff1Sjsg  */
1581bb76ff1Sjsg struct drm_i915_gem_object *
__i915_gem_object_create_user(struct drm_i915_private * i915,u64 size,struct intel_memory_region ** placements,unsigned int n_placements)1591bb76ff1Sjsg __i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
1601bb76ff1Sjsg 			      struct intel_memory_region **placements,
1611bb76ff1Sjsg 			      unsigned int n_placements)
1621bb76ff1Sjsg {
1631bb76ff1Sjsg 	return __i915_gem_object_create_user_ext(i915, size, placements,
1641bb76ff1Sjsg 						 n_placements, 0);
1651bb76ff1Sjsg }
1661bb76ff1Sjsg 
1675ca02815Sjsg int
i915_gem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)1685ca02815Sjsg i915_gem_dumb_create(struct drm_file *file,
1695ca02815Sjsg 		     struct drm_device *dev,
1705ca02815Sjsg 		     struct drm_mode_create_dumb *args)
1715ca02815Sjsg {
1725ca02815Sjsg 	struct drm_i915_gem_object *obj;
1735ca02815Sjsg 	struct intel_memory_region *mr;
1745ca02815Sjsg 	enum intel_memory_type mem_type;
1755ca02815Sjsg 	int cpp = DIV_ROUND_UP(args->bpp, 8);
1765ca02815Sjsg 	u32 format;
1775ca02815Sjsg 
1785ca02815Sjsg 	switch (cpp) {
1795ca02815Sjsg 	case 1:
1805ca02815Sjsg 		format = DRM_FORMAT_C8;
1815ca02815Sjsg 		break;
1825ca02815Sjsg 	case 2:
1835ca02815Sjsg 		format = DRM_FORMAT_RGB565;
1845ca02815Sjsg 		break;
1855ca02815Sjsg 	case 4:
1865ca02815Sjsg 		format = DRM_FORMAT_XRGB8888;
1875ca02815Sjsg 		break;
1885ca02815Sjsg 	default:
1895ca02815Sjsg 		return -EINVAL;
1905ca02815Sjsg 	}
1915ca02815Sjsg 
1925ca02815Sjsg 	/* have to work out size/pitch and return them */
193f005ef32Sjsg 	args->pitch = ALIGN(args->width * cpp, 64);
1945ca02815Sjsg 
1955ca02815Sjsg 	/* align stride to page size so that we can remap */
1965ca02815Sjsg 	if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
1975ca02815Sjsg 						    DRM_FORMAT_MOD_LINEAR))
198f005ef32Sjsg 		args->pitch = ALIGN(args->pitch, 4096);
1995ca02815Sjsg 
2005ca02815Sjsg 	if (args->pitch < args->width)
2015ca02815Sjsg 		return -EINVAL;
2025ca02815Sjsg 
2035ca02815Sjsg 	args->size = mul_u32_u32(args->pitch, args->height);
2045ca02815Sjsg 
2055ca02815Sjsg 	mem_type = INTEL_MEMORY_SYSTEM;
2065ca02815Sjsg 	if (HAS_LMEM(to_i915(dev)))
2075ca02815Sjsg 		mem_type = INTEL_MEMORY_LOCAL;
2085ca02815Sjsg 
2095ca02815Sjsg 	mr = intel_memory_region_by_type(to_i915(dev), mem_type);
2105ca02815Sjsg 
2115ca02815Sjsg 	obj = __i915_gem_object_create_user(to_i915(dev), args->size, &mr, 1);
2125ca02815Sjsg 	if (IS_ERR(obj))
2135ca02815Sjsg 		return PTR_ERR(obj);
2145ca02815Sjsg 
2155ca02815Sjsg 	return i915_gem_publish(obj, file, &args->size, &args->handle);
2165ca02815Sjsg }
2175ca02815Sjsg 
2185ca02815Sjsg /**
219f005ef32Sjsg  * i915_gem_create_ioctl - Creates a new mm object and returns a handle to it.
2205ca02815Sjsg  * @dev: drm device pointer
2215ca02815Sjsg  * @data: ioctl data blob
2225ca02815Sjsg  * @file: drm file pointer
2235ca02815Sjsg  */
2245ca02815Sjsg int
i915_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file)2255ca02815Sjsg i915_gem_create_ioctl(struct drm_device *dev, void *data,
2265ca02815Sjsg 		      struct drm_file *file)
2275ca02815Sjsg {
2285ca02815Sjsg 	struct drm_i915_private *i915 = to_i915(dev);
2295ca02815Sjsg 	struct drm_i915_gem_create *args = data;
2305ca02815Sjsg 	struct drm_i915_gem_object *obj;
2315ca02815Sjsg 	struct intel_memory_region *mr;
2325ca02815Sjsg 
2335ca02815Sjsg 	mr = intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
2345ca02815Sjsg 
2355ca02815Sjsg 	obj = __i915_gem_object_create_user(i915, args->size, &mr, 1);
2365ca02815Sjsg 	if (IS_ERR(obj))
2375ca02815Sjsg 		return PTR_ERR(obj);
2385ca02815Sjsg 
2395ca02815Sjsg 	return i915_gem_publish(obj, file, &args->size, &args->handle);
2405ca02815Sjsg }
2415ca02815Sjsg 
2425ca02815Sjsg struct create_ext {
2435ca02815Sjsg 	struct drm_i915_private *i915;
2445ca02815Sjsg 	struct intel_memory_region *placements[INTEL_REGION_UNKNOWN];
2455ca02815Sjsg 	unsigned int n_placements;
2461bb76ff1Sjsg 	unsigned int placement_mask;
2471bb76ff1Sjsg 	unsigned long flags;
248f005ef32Sjsg 	unsigned int pat_index;
2495ca02815Sjsg };
2505ca02815Sjsg 
repr_placements(char * buf,size_t size,struct intel_memory_region ** placements,int n_placements)2515ca02815Sjsg static void repr_placements(char *buf, size_t size,
2525ca02815Sjsg 			    struct intel_memory_region **placements,
2535ca02815Sjsg 			    int n_placements)
2545ca02815Sjsg {
2555ca02815Sjsg 	int i;
2565ca02815Sjsg 
2575ca02815Sjsg 	buf[0] = '\0';
2585ca02815Sjsg 
2595ca02815Sjsg 	for (i = 0; i < n_placements; i++) {
2605ca02815Sjsg 		struct intel_memory_region *mr = placements[i];
2615ca02815Sjsg 		int r;
2625ca02815Sjsg 
2635ca02815Sjsg 		r = snprintf(buf, size, "\n  %s -> { class: %d, inst: %d }",
2645ca02815Sjsg 			     mr->name, mr->type, mr->instance);
2655ca02815Sjsg 		if (r >= size)
2665ca02815Sjsg 			return;
2675ca02815Sjsg 
2685ca02815Sjsg 		buf += r;
2695ca02815Sjsg 		size -= r;
2705ca02815Sjsg 	}
2715ca02815Sjsg }
2725ca02815Sjsg 
set_placements(struct drm_i915_gem_create_ext_memory_regions * args,struct create_ext * ext_data)2735ca02815Sjsg static int set_placements(struct drm_i915_gem_create_ext_memory_regions *args,
2745ca02815Sjsg 			  struct create_ext *ext_data)
2755ca02815Sjsg {
2765ca02815Sjsg 	struct drm_i915_private *i915 = ext_data->i915;
2775ca02815Sjsg 	struct drm_i915_gem_memory_class_instance __user *uregions =
2785ca02815Sjsg 		u64_to_user_ptr(args->regions);
2795ca02815Sjsg 	struct intel_memory_region *placements[INTEL_REGION_UNKNOWN];
2805ca02815Sjsg 	u32 mask;
2815ca02815Sjsg 	int i, ret = 0;
2825ca02815Sjsg 
2835ca02815Sjsg 	if (args->pad) {
2845ca02815Sjsg 		drm_dbg(&i915->drm, "pad should be zero\n");
2855ca02815Sjsg 		ret = -EINVAL;
2865ca02815Sjsg 	}
2875ca02815Sjsg 
2885ca02815Sjsg 	if (!args->num_regions) {
2895ca02815Sjsg 		drm_dbg(&i915->drm, "num_regions is zero\n");
2905ca02815Sjsg 		ret = -EINVAL;
2915ca02815Sjsg 	}
2925ca02815Sjsg 
2935ca02815Sjsg 	BUILD_BUG_ON(ARRAY_SIZE(i915->mm.regions) != ARRAY_SIZE(placements));
2945ca02815Sjsg 	BUILD_BUG_ON(ARRAY_SIZE(ext_data->placements) != ARRAY_SIZE(placements));
2955ca02815Sjsg 	if (args->num_regions > ARRAY_SIZE(i915->mm.regions)) {
2965ca02815Sjsg 		drm_dbg(&i915->drm, "num_regions is too large\n");
2975ca02815Sjsg 		ret = -EINVAL;
2985ca02815Sjsg 	}
2995ca02815Sjsg 
3005ca02815Sjsg 	if (ret)
3015ca02815Sjsg 		return ret;
3025ca02815Sjsg 
3035ca02815Sjsg 	mask = 0;
3045ca02815Sjsg 	for (i = 0; i < args->num_regions; i++) {
3055ca02815Sjsg 		struct drm_i915_gem_memory_class_instance region;
3065ca02815Sjsg 		struct intel_memory_region *mr;
3075ca02815Sjsg 
3085ca02815Sjsg 		if (copy_from_user(&region, uregions, sizeof(region)))
3095ca02815Sjsg 			return -EFAULT;
3105ca02815Sjsg 
3115ca02815Sjsg 		mr = intel_memory_region_lookup(i915,
3125ca02815Sjsg 						region.memory_class,
3135ca02815Sjsg 						region.memory_instance);
3145ca02815Sjsg 		if (!mr || mr->private) {
3155ca02815Sjsg 			drm_dbg(&i915->drm, "Device is missing region { class: %d, inst: %d } at index = %d\n",
3165ca02815Sjsg 				region.memory_class, region.memory_instance, i);
3175ca02815Sjsg 			ret = -EINVAL;
3185ca02815Sjsg 			goto out_dump;
3195ca02815Sjsg 		}
3205ca02815Sjsg 
3215ca02815Sjsg 		if (mask & BIT(mr->id)) {
3225ca02815Sjsg 			drm_dbg(&i915->drm, "Found duplicate placement %s -> { class: %d, inst: %d } at index = %d\n",
3235ca02815Sjsg 				mr->name, region.memory_class,
3245ca02815Sjsg 				region.memory_instance, i);
3255ca02815Sjsg 			ret = -EINVAL;
3265ca02815Sjsg 			goto out_dump;
3275ca02815Sjsg 		}
3285ca02815Sjsg 
3295ca02815Sjsg 		placements[i] = mr;
3305ca02815Sjsg 		mask |= BIT(mr->id);
3315ca02815Sjsg 
3325ca02815Sjsg 		++uregions;
3335ca02815Sjsg 	}
3345ca02815Sjsg 
3355ca02815Sjsg 	if (ext_data->n_placements) {
3365ca02815Sjsg 		ret = -EINVAL;
3375ca02815Sjsg 		goto out_dump;
3385ca02815Sjsg 	}
3395ca02815Sjsg 
3405ca02815Sjsg 	ext_data->n_placements = args->num_regions;
3415ca02815Sjsg 	for (i = 0; i < args->num_regions; i++)
3425ca02815Sjsg 		ext_data->placements[i] = placements[i];
3435ca02815Sjsg 
3441bb76ff1Sjsg 	ext_data->placement_mask = mask;
3455ca02815Sjsg 	return 0;
3465ca02815Sjsg 
3475ca02815Sjsg out_dump:
3485ca02815Sjsg 	if (1) {
3495ca02815Sjsg 		char buf[256];
3505ca02815Sjsg 
3515ca02815Sjsg 		if (ext_data->n_placements) {
3525ca02815Sjsg 			repr_placements(buf,
3535ca02815Sjsg 					sizeof(buf),
3545ca02815Sjsg 					ext_data->placements,
3555ca02815Sjsg 					ext_data->n_placements);
3565ca02815Sjsg 			drm_dbg(&i915->drm,
3575ca02815Sjsg 				"Placements were already set in previous EXT. Existing placements: %s\n",
3585ca02815Sjsg 				buf);
3595ca02815Sjsg 		}
3605ca02815Sjsg 
3615ca02815Sjsg 		repr_placements(buf, sizeof(buf), placements, i);
3625ca02815Sjsg 		drm_dbg(&i915->drm, "New placements(so far validated): %s\n", buf);
3635ca02815Sjsg 	}
3645ca02815Sjsg 
3655ca02815Sjsg 	return ret;
3665ca02815Sjsg }
3675ca02815Sjsg 
ext_set_placements(struct i915_user_extension __user * base,void * data)3685ca02815Sjsg static int ext_set_placements(struct i915_user_extension __user *base,
3695ca02815Sjsg 			      void *data)
3705ca02815Sjsg {
3715ca02815Sjsg 	struct drm_i915_gem_create_ext_memory_regions ext;
3725ca02815Sjsg 
3735ca02815Sjsg 	if (copy_from_user(&ext, base, sizeof(ext)))
3745ca02815Sjsg 		return -EFAULT;
3755ca02815Sjsg 
3765ca02815Sjsg 	return set_placements(&ext, data);
3775ca02815Sjsg }
3785ca02815Sjsg 
ext_set_protected(struct i915_user_extension __user * base,void * data)3791bb76ff1Sjsg static int ext_set_protected(struct i915_user_extension __user *base, void *data)
3801bb76ff1Sjsg {
3811bb76ff1Sjsg 	struct drm_i915_gem_create_ext_protected_content ext;
3821bb76ff1Sjsg 	struct create_ext *ext_data = data;
3831bb76ff1Sjsg 
3841bb76ff1Sjsg 	if (copy_from_user(&ext, base, sizeof(ext)))
3851bb76ff1Sjsg 		return -EFAULT;
3861bb76ff1Sjsg 
3871bb76ff1Sjsg 	if (ext.flags)
3881bb76ff1Sjsg 		return -EINVAL;
3891bb76ff1Sjsg 
390f005ef32Sjsg 	if (!intel_pxp_is_enabled(ext_data->i915->pxp))
3911bb76ff1Sjsg 		return -ENODEV;
3921bb76ff1Sjsg 
3931bb76ff1Sjsg 	ext_data->flags |= I915_BO_PROTECTED;
3941bb76ff1Sjsg 
3951bb76ff1Sjsg 	return 0;
3961bb76ff1Sjsg }
3971bb76ff1Sjsg 
ext_set_pat(struct i915_user_extension __user * base,void * data)398f005ef32Sjsg static int ext_set_pat(struct i915_user_extension __user *base, void *data)
399f005ef32Sjsg {
400f005ef32Sjsg 	struct create_ext *ext_data = data;
401f005ef32Sjsg 	struct drm_i915_private *i915 = ext_data->i915;
402f005ef32Sjsg 	struct drm_i915_gem_create_ext_set_pat ext;
403f005ef32Sjsg 	unsigned int max_pat_index;
404f005ef32Sjsg 
405f005ef32Sjsg 	BUILD_BUG_ON(sizeof(struct drm_i915_gem_create_ext_set_pat) !=
406f005ef32Sjsg 		     offsetofend(struct drm_i915_gem_create_ext_set_pat, rsvd));
407f005ef32Sjsg 
408*ddf58b8fSjsg 	/* Limiting the extension only to Xe_LPG and beyond */
409*ddf58b8fSjsg 	if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 70))
410f005ef32Sjsg 		return -ENODEV;
411f005ef32Sjsg 
412f005ef32Sjsg 	if (copy_from_user(&ext, base, sizeof(ext)))
413f005ef32Sjsg 		return -EFAULT;
414f005ef32Sjsg 
415f005ef32Sjsg 	max_pat_index = INTEL_INFO(i915)->max_pat_index;
416f005ef32Sjsg 
417f005ef32Sjsg 	if (ext.pat_index > max_pat_index) {
418f005ef32Sjsg 		drm_dbg(&i915->drm, "PAT index is invalid: %u\n",
419f005ef32Sjsg 			ext.pat_index);
420f005ef32Sjsg 		return -EINVAL;
421f005ef32Sjsg 	}
422f005ef32Sjsg 
423f005ef32Sjsg 	ext_data->pat_index = ext.pat_index;
424f005ef32Sjsg 
425f005ef32Sjsg 	return 0;
426f005ef32Sjsg }
427f005ef32Sjsg 
4285ca02815Sjsg static const i915_user_extension_fn create_extensions[] = {
4295ca02815Sjsg 	[I915_GEM_CREATE_EXT_MEMORY_REGIONS] = ext_set_placements,
4301bb76ff1Sjsg 	[I915_GEM_CREATE_EXT_PROTECTED_CONTENT] = ext_set_protected,
431f005ef32Sjsg 	[I915_GEM_CREATE_EXT_SET_PAT] = ext_set_pat,
4325ca02815Sjsg };
4335ca02815Sjsg 
434f005ef32Sjsg #define PAT_INDEX_NOT_SET	0xffff
4355ca02815Sjsg /**
436f005ef32Sjsg  * i915_gem_create_ext_ioctl - Creates a new mm object and returns a handle to it.
4375ca02815Sjsg  * @dev: drm device pointer
4385ca02815Sjsg  * @data: ioctl data blob
4395ca02815Sjsg  * @file: drm file pointer
4405ca02815Sjsg  */
4415ca02815Sjsg int
i915_gem_create_ext_ioctl(struct drm_device * dev,void * data,struct drm_file * file)4425ca02815Sjsg i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
4435ca02815Sjsg 			  struct drm_file *file)
4445ca02815Sjsg {
4455ca02815Sjsg 	struct drm_i915_private *i915 = to_i915(dev);
4465ca02815Sjsg 	struct drm_i915_gem_create_ext *args = data;
4475ca02815Sjsg 	struct create_ext ext_data = { .i915 = i915 };
4485ca02815Sjsg 	struct drm_i915_gem_object *obj;
4495ca02815Sjsg 	int ret;
4505ca02815Sjsg 
4511bb76ff1Sjsg 	if (args->flags & ~I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS)
4525ca02815Sjsg 		return -EINVAL;
4535ca02815Sjsg 
454f005ef32Sjsg 	ext_data.pat_index = PAT_INDEX_NOT_SET;
4555ca02815Sjsg 	ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
4565ca02815Sjsg 				   create_extensions,
4575ca02815Sjsg 				   ARRAY_SIZE(create_extensions),
4585ca02815Sjsg 				   &ext_data);
4595ca02815Sjsg 	if (ret)
4605ca02815Sjsg 		return ret;
4615ca02815Sjsg 
4625ca02815Sjsg 	if (!ext_data.n_placements) {
4635ca02815Sjsg 		ext_data.placements[0] =
4645ca02815Sjsg 			intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
4655ca02815Sjsg 		ext_data.n_placements = 1;
4665ca02815Sjsg 	}
4675ca02815Sjsg 
4681bb76ff1Sjsg 	if (args->flags & I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) {
4691bb76ff1Sjsg 		if (ext_data.n_placements == 1)
4701bb76ff1Sjsg 			return -EINVAL;
4711bb76ff1Sjsg 
4721bb76ff1Sjsg 		/*
4731bb76ff1Sjsg 		 * We always need to be able to spill to system memory, if we
4741bb76ff1Sjsg 		 * can't place in the mappable part of LMEM.
4751bb76ff1Sjsg 		 */
4761bb76ff1Sjsg 		if (!(ext_data.placement_mask & BIT(INTEL_REGION_SMEM)))
4771bb76ff1Sjsg 			return -EINVAL;
4781bb76ff1Sjsg 	} else {
4791bb76ff1Sjsg 		if (ext_data.n_placements > 1 ||
4801bb76ff1Sjsg 		    ext_data.placements[0]->type != INTEL_MEMORY_SYSTEM)
4811bb76ff1Sjsg 			ext_data.flags |= I915_BO_ALLOC_GPU_ONLY;
4821bb76ff1Sjsg 	}
4831bb76ff1Sjsg 
4841bb76ff1Sjsg 	obj = __i915_gem_object_create_user_ext(i915, args->size,
4855ca02815Sjsg 						ext_data.placements,
4861bb76ff1Sjsg 						ext_data.n_placements,
4871bb76ff1Sjsg 						ext_data.flags);
4885ca02815Sjsg 	if (IS_ERR(obj))
4895ca02815Sjsg 		return PTR_ERR(obj);
4905ca02815Sjsg 
491f005ef32Sjsg 	if (ext_data.pat_index != PAT_INDEX_NOT_SET) {
492f005ef32Sjsg 		i915_gem_object_set_pat_index(obj, ext_data.pat_index);
493f005ef32Sjsg 		/* Mark pat_index is set by UMD */
494f005ef32Sjsg 		obj->pat_set_by_user = true;
495f005ef32Sjsg 	}
496f005ef32Sjsg 
4975ca02815Sjsg 	return i915_gem_publish(obj, file, &args->size, &args->handle);
4985ca02815Sjsg }
499