15ca02815Sjsg // SPDX-License-Identifier: MIT
25ca02815Sjsg /*
35ca02815Sjsg  * Copyright © 2021 Intel Corporation
45ca02815Sjsg  */
55ca02815Sjsg 
65ca02815Sjsg #include <linux/slab.h>
75ca02815Sjsg 
85ca02815Sjsg #include <drm/ttm/ttm_placement.h>
9*f005ef32Sjsg #include <drm/ttm/ttm_bo.h>
105ca02815Sjsg 
111bb76ff1Sjsg #include <drm/drm_buddy.h>
121bb76ff1Sjsg 
135ca02815Sjsg #include "i915_ttm_buddy_manager.h"
145ca02815Sjsg 
155ca02815Sjsg #include "i915_gem.h"
165ca02815Sjsg 
175ca02815Sjsg struct i915_ttm_buddy_manager {
185ca02815Sjsg 	struct ttm_resource_manager manager;
191bb76ff1Sjsg 	struct drm_buddy mm;
205ca02815Sjsg 	struct list_head reserved;
215ca02815Sjsg 	struct rwlock lock;
221bb76ff1Sjsg 	unsigned long visible_size;
231bb76ff1Sjsg 	unsigned long visible_avail;
241bb76ff1Sjsg 	unsigned long visible_reserved;
255ca02815Sjsg 	u64 default_page_size;
265ca02815Sjsg };
275ca02815Sjsg 
285ca02815Sjsg static struct i915_ttm_buddy_manager *
to_buddy_manager(struct ttm_resource_manager * man)295ca02815Sjsg to_buddy_manager(struct ttm_resource_manager *man)
305ca02815Sjsg {
315ca02815Sjsg 	return container_of(man, struct i915_ttm_buddy_manager, manager);
325ca02815Sjsg }
335ca02815Sjsg 
i915_ttm_buddy_man_alloc(struct ttm_resource_manager * man,struct ttm_buffer_object * bo,const struct ttm_place * place,struct ttm_resource ** res)345ca02815Sjsg static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
355ca02815Sjsg 				    struct ttm_buffer_object *bo,
365ca02815Sjsg 				    const struct ttm_place *place,
375ca02815Sjsg 				    struct ttm_resource **res)
385ca02815Sjsg {
395ca02815Sjsg 	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
405ca02815Sjsg 	struct i915_ttm_buddy_resource *bman_res;
411bb76ff1Sjsg 	struct drm_buddy *mm = &bman->mm;
421bb76ff1Sjsg 	unsigned long n_pages, lpfn;
435ca02815Sjsg 	u64 min_page_size;
445ca02815Sjsg 	u64 size;
455ca02815Sjsg 	int err;
465ca02815Sjsg 
471bb76ff1Sjsg 	lpfn = place->lpfn;
481bb76ff1Sjsg 	if (!lpfn)
491bb76ff1Sjsg 		lpfn = man->size;
505ca02815Sjsg 
515ca02815Sjsg 	bman_res = kzalloc(sizeof(*bman_res), GFP_KERNEL);
525ca02815Sjsg 	if (!bman_res)
535ca02815Sjsg 		return -ENOMEM;
545ca02815Sjsg 
555ca02815Sjsg 	ttm_resource_init(bo, place, &bman_res->base);
565ca02815Sjsg 	INIT_LIST_HEAD(&bman_res->blocks);
575ca02815Sjsg 	bman_res->mm = mm;
585ca02815Sjsg 
591bb76ff1Sjsg 	if (place->flags & TTM_PL_FLAG_TOPDOWN)
601bb76ff1Sjsg 		bman_res->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
611bb76ff1Sjsg 
621bb76ff1Sjsg 	if (place->fpfn || lpfn != man->size)
631bb76ff1Sjsg 		bman_res->flags |= DRM_BUDDY_RANGE_ALLOCATION;
641bb76ff1Sjsg 
65*f005ef32Sjsg 	GEM_BUG_ON(!bman_res->base.size);
66*f005ef32Sjsg 	size = bman_res->base.size;
675ca02815Sjsg 
685ca02815Sjsg 	min_page_size = bman->default_page_size;
695ca02815Sjsg 	if (bo->page_alignment)
705ca02815Sjsg 		min_page_size = bo->page_alignment << PAGE_SHIFT;
715ca02815Sjsg 
725ca02815Sjsg 	GEM_BUG_ON(min_page_size < mm->chunk_size);
731bb76ff1Sjsg 	GEM_BUG_ON(!IS_ALIGNED(size, min_page_size));
741bb76ff1Sjsg 
75*f005ef32Sjsg 	if (place->fpfn + PFN_UP(bman_res->base.size) != place->lpfn &&
761bb76ff1Sjsg 	    place->flags & TTM_PL_FLAG_CONTIGUOUS) {
771bb76ff1Sjsg 		unsigned long pages;
781bb76ff1Sjsg 
795ca02815Sjsg 		size = roundup_pow_of_two(size);
801bb76ff1Sjsg 		min_page_size = size;
811bb76ff1Sjsg 
821bb76ff1Sjsg 		pages = size >> ilog2(mm->chunk_size);
831bb76ff1Sjsg 		if (pages > lpfn)
841bb76ff1Sjsg 			lpfn = pages;
855ca02815Sjsg 	}
865ca02815Sjsg 
871bb76ff1Sjsg 	if (size > lpfn << PAGE_SHIFT) {
885ca02815Sjsg 		err = -E2BIG;
895ca02815Sjsg 		goto err_free_res;
905ca02815Sjsg 	}
915ca02815Sjsg 
925ca02815Sjsg 	n_pages = size >> ilog2(mm->chunk_size);
935ca02815Sjsg 
945ca02815Sjsg 	mutex_lock(&bman->lock);
951bb76ff1Sjsg 	if (lpfn <= bman->visible_size && n_pages > bman->visible_avail) {
965ca02815Sjsg 		mutex_unlock(&bman->lock);
975ca02815Sjsg 		err = -ENOSPC;
981bb76ff1Sjsg 		goto err_free_res;
995ca02815Sjsg 	}
1005ca02815Sjsg 
1011bb76ff1Sjsg 	err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT,
1021bb76ff1Sjsg 				     (u64)lpfn << PAGE_SHIFT,
1031bb76ff1Sjsg 				     (u64)n_pages << PAGE_SHIFT,
1041bb76ff1Sjsg 				     min_page_size,
1051bb76ff1Sjsg 				     &bman_res->blocks,
1061bb76ff1Sjsg 				     bman_res->flags);
1071bb76ff1Sjsg 	if (unlikely(err))
1081bb76ff1Sjsg 		goto err_free_blocks;
1095ca02815Sjsg 
1101bb76ff1Sjsg 	if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
111*f005ef32Sjsg 		u64 original_size = (u64)bman_res->base.size;
1125ca02815Sjsg 
1131bb76ff1Sjsg 		drm_buddy_block_trim(mm,
1141bb76ff1Sjsg 				     original_size,
1151bb76ff1Sjsg 				     &bman_res->blocks);
1161bb76ff1Sjsg 	}
1171bb76ff1Sjsg 
1181bb76ff1Sjsg 	if (lpfn <= bman->visible_size) {
119*f005ef32Sjsg 		bman_res->used_visible_size = PFN_UP(bman_res->base.size);
1201bb76ff1Sjsg 	} else {
1211bb76ff1Sjsg 		struct drm_buddy_block *block;
1221bb76ff1Sjsg 
1231bb76ff1Sjsg 		list_for_each_entry(block, &bman_res->blocks, link) {
1241bb76ff1Sjsg 			unsigned long start =
1251bb76ff1Sjsg 				drm_buddy_block_offset(block) >> PAGE_SHIFT;
1261bb76ff1Sjsg 
1271bb76ff1Sjsg 			if (start < bman->visible_size) {
1281bb76ff1Sjsg 				unsigned long end = start +
1291bb76ff1Sjsg 					(drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
1301bb76ff1Sjsg 
1311bb76ff1Sjsg 				bman_res->used_visible_size +=
1321bb76ff1Sjsg 					min(end, bman->visible_size) - start;
1331bb76ff1Sjsg 			}
1341bb76ff1Sjsg 		}
1351bb76ff1Sjsg 	}
1361bb76ff1Sjsg 
1371bb76ff1Sjsg 	if (bman_res->used_visible_size)
1381bb76ff1Sjsg 		bman->visible_avail -= bman_res->used_visible_size;
1391bb76ff1Sjsg 
1401bb76ff1Sjsg 	mutex_unlock(&bman->lock);
1411bb76ff1Sjsg 
1425ca02815Sjsg 	*res = &bman_res->base;
1435ca02815Sjsg 	return 0;
1445ca02815Sjsg 
1455ca02815Sjsg err_free_blocks:
1461bb76ff1Sjsg 	drm_buddy_free_list(mm, &bman_res->blocks);
1475ca02815Sjsg 	mutex_unlock(&bman->lock);
1485ca02815Sjsg err_free_res:
1491bb76ff1Sjsg 	ttm_resource_fini(man, &bman_res->base);
1505ca02815Sjsg 	kfree(bman_res);
1515ca02815Sjsg 	return err;
1525ca02815Sjsg }
1535ca02815Sjsg 
i915_ttm_buddy_man_free(struct ttm_resource_manager * man,struct ttm_resource * res)1545ca02815Sjsg static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man,
1555ca02815Sjsg 				    struct ttm_resource *res)
1565ca02815Sjsg {
1575ca02815Sjsg 	struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
1585ca02815Sjsg 	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
1595ca02815Sjsg 
1605ca02815Sjsg 	mutex_lock(&bman->lock);
1611bb76ff1Sjsg 	drm_buddy_free_list(&bman->mm, &bman_res->blocks);
1621bb76ff1Sjsg 	bman->visible_avail += bman_res->used_visible_size;
1635ca02815Sjsg 	mutex_unlock(&bman->lock);
1645ca02815Sjsg 
1651bb76ff1Sjsg 	ttm_resource_fini(man, res);
1665ca02815Sjsg 	kfree(bman_res);
1675ca02815Sjsg }
1685ca02815Sjsg 
i915_ttm_buddy_man_intersects(struct ttm_resource_manager * man,struct ttm_resource * res,const struct ttm_place * place,size_t size)1691bb76ff1Sjsg static bool i915_ttm_buddy_man_intersects(struct ttm_resource_manager *man,
1701bb76ff1Sjsg 					  struct ttm_resource *res,
1711bb76ff1Sjsg 					  const struct ttm_place *place,
1721bb76ff1Sjsg 					  size_t size)
1731bb76ff1Sjsg {
1741bb76ff1Sjsg 	struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
1751bb76ff1Sjsg 	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
1761bb76ff1Sjsg 	struct drm_buddy *mm = &bman->mm;
1771bb76ff1Sjsg 	struct drm_buddy_block *block;
1781bb76ff1Sjsg 
1791bb76ff1Sjsg 	if (!place->fpfn && !place->lpfn)
1801bb76ff1Sjsg 		return true;
1811bb76ff1Sjsg 
1821bb76ff1Sjsg 	GEM_BUG_ON(!place->lpfn);
1831bb76ff1Sjsg 
1841bb76ff1Sjsg 	/*
1851bb76ff1Sjsg 	 * If we just want something mappable then we can quickly check
1861bb76ff1Sjsg 	 * if the current victim resource is using any of the CPU
1871bb76ff1Sjsg 	 * visible portion.
1881bb76ff1Sjsg 	 */
1891bb76ff1Sjsg 	if (!place->fpfn &&
1901bb76ff1Sjsg 	    place->lpfn == i915_ttm_buddy_man_visible_size(man))
1911bb76ff1Sjsg 		return bman_res->used_visible_size > 0;
1921bb76ff1Sjsg 
1931bb76ff1Sjsg 	/* Check each drm buddy block individually */
1941bb76ff1Sjsg 	list_for_each_entry(block, &bman_res->blocks, link) {
1951bb76ff1Sjsg 		unsigned long fpfn =
1961bb76ff1Sjsg 			drm_buddy_block_offset(block) >> PAGE_SHIFT;
1971bb76ff1Sjsg 		unsigned long lpfn = fpfn +
1981bb76ff1Sjsg 			(drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
1991bb76ff1Sjsg 
2001bb76ff1Sjsg 		if (place->fpfn < lpfn && place->lpfn > fpfn)
2011bb76ff1Sjsg 			return true;
2021bb76ff1Sjsg 	}
2031bb76ff1Sjsg 
2041bb76ff1Sjsg 	return false;
2051bb76ff1Sjsg }
2061bb76ff1Sjsg 
i915_ttm_buddy_man_compatible(struct ttm_resource_manager * man,struct ttm_resource * res,const struct ttm_place * place,size_t size)2071bb76ff1Sjsg static bool i915_ttm_buddy_man_compatible(struct ttm_resource_manager *man,
2081bb76ff1Sjsg 					  struct ttm_resource *res,
2091bb76ff1Sjsg 					  const struct ttm_place *place,
2101bb76ff1Sjsg 					  size_t size)
2111bb76ff1Sjsg {
2121bb76ff1Sjsg 	struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
2131bb76ff1Sjsg 	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
2141bb76ff1Sjsg 	struct drm_buddy *mm = &bman->mm;
2151bb76ff1Sjsg 	struct drm_buddy_block *block;
2161bb76ff1Sjsg 
2171bb76ff1Sjsg 	if (!place->fpfn && !place->lpfn)
2181bb76ff1Sjsg 		return true;
2191bb76ff1Sjsg 
2201bb76ff1Sjsg 	GEM_BUG_ON(!place->lpfn);
2211bb76ff1Sjsg 
2221bb76ff1Sjsg 	if (!place->fpfn &&
2231bb76ff1Sjsg 	    place->lpfn == i915_ttm_buddy_man_visible_size(man))
224*f005ef32Sjsg 		return bman_res->used_visible_size == PFN_UP(res->size);
2251bb76ff1Sjsg 
2261bb76ff1Sjsg 	/* Check each drm buddy block individually */
2271bb76ff1Sjsg 	list_for_each_entry(block, &bman_res->blocks, link) {
2281bb76ff1Sjsg 		unsigned long fpfn =
2291bb76ff1Sjsg 			drm_buddy_block_offset(block) >> PAGE_SHIFT;
2301bb76ff1Sjsg 		unsigned long lpfn = fpfn +
2311bb76ff1Sjsg 			(drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
2321bb76ff1Sjsg 
2331bb76ff1Sjsg 		if (fpfn < place->fpfn || lpfn > place->lpfn)
2341bb76ff1Sjsg 			return false;
2351bb76ff1Sjsg 	}
2361bb76ff1Sjsg 
2371bb76ff1Sjsg 	return true;
2381bb76ff1Sjsg }
2391bb76ff1Sjsg 
i915_ttm_buddy_man_debug(struct ttm_resource_manager * man,struct drm_printer * printer)2401bb76ff1Sjsg static void i915_ttm_buddy_man_debug(struct ttm_resource_manager *man,
2411bb76ff1Sjsg 				     struct drm_printer *printer)
2421bb76ff1Sjsg {
2431bb76ff1Sjsg 	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
2441bb76ff1Sjsg 	struct drm_buddy_block *block;
2451bb76ff1Sjsg 
2461bb76ff1Sjsg 	mutex_lock(&bman->lock);
2471bb76ff1Sjsg 	drm_printf(printer, "default_page_size: %lluKiB\n",
2481bb76ff1Sjsg 		   bman->default_page_size >> 10);
2491bb76ff1Sjsg 	drm_printf(printer, "visible_avail: %lluMiB\n",
2501bb76ff1Sjsg 		   (u64)bman->visible_avail << PAGE_SHIFT >> 20);
2511bb76ff1Sjsg 	drm_printf(printer, "visible_size: %lluMiB\n",
2521bb76ff1Sjsg 		   (u64)bman->visible_size << PAGE_SHIFT >> 20);
2531bb76ff1Sjsg 	drm_printf(printer, "visible_reserved: %lluMiB\n",
2541bb76ff1Sjsg 		   (u64)bman->visible_reserved << PAGE_SHIFT >> 20);
2551bb76ff1Sjsg 
2561bb76ff1Sjsg 	drm_buddy_print(&bman->mm, printer);
2571bb76ff1Sjsg 
2581bb76ff1Sjsg 	drm_printf(printer, "reserved:\n");
2591bb76ff1Sjsg 	list_for_each_entry(block, &bman->reserved, link)
2601bb76ff1Sjsg 		drm_buddy_block_print(&bman->mm, block, printer);
2611bb76ff1Sjsg 	mutex_unlock(&bman->lock);
2621bb76ff1Sjsg }
2631bb76ff1Sjsg 
2645ca02815Sjsg static const struct ttm_resource_manager_func i915_ttm_buddy_manager_func = {
2655ca02815Sjsg 	.alloc = i915_ttm_buddy_man_alloc,
2665ca02815Sjsg 	.free = i915_ttm_buddy_man_free,
2671bb76ff1Sjsg 	.intersects = i915_ttm_buddy_man_intersects,
2681bb76ff1Sjsg 	.compatible = i915_ttm_buddy_man_compatible,
2691bb76ff1Sjsg 	.debug = i915_ttm_buddy_man_debug,
2705ca02815Sjsg };
2715ca02815Sjsg 
2725ca02815Sjsg /**
2735ca02815Sjsg  * i915_ttm_buddy_man_init - Setup buddy allocator based ttm manager
2745ca02815Sjsg  * @bdev: The ttm device
2755ca02815Sjsg  * @type: Memory type we want to manage
2765ca02815Sjsg  * @use_tt: Set use_tt for the manager
2775ca02815Sjsg  * @size: The size in bytes to manage
2781bb76ff1Sjsg  * @visible_size: The CPU visible size in bytes to manage
2795ca02815Sjsg  * @default_page_size: The default minimum page size in bytes for allocations,
2805ca02815Sjsg  * this must be at least as large as @chunk_size, and can be overridden by
2815ca02815Sjsg  * setting the BO page_alignment, to be larger or smaller as needed.
2825ca02815Sjsg  * @chunk_size: The minimum page size in bytes for our allocations i.e
2835ca02815Sjsg  * order-zero
2845ca02815Sjsg  *
2855ca02815Sjsg  * Note that the starting address is assumed to be zero here, since this
2865ca02815Sjsg  * simplifies keeping the property where allocated blocks having natural
2875ca02815Sjsg  * power-of-two alignment. So long as the real starting address is some large
2885ca02815Sjsg  * power-of-two, or naturally start from zero, then this should be fine.  Also
2895ca02815Sjsg  * the &i915_ttm_buddy_man_reserve interface can be used to preserve alignment
2905ca02815Sjsg  * if say there is some unusable range from the start of the region. We can
2915ca02815Sjsg  * revisit this in the future and make the interface accept an actual starting
2925ca02815Sjsg  * offset and let it take care of the rest.
2935ca02815Sjsg  *
2945ca02815Sjsg  * Note that if the @size is not aligned to the @chunk_size then we perform the
2955ca02815Sjsg  * required rounding to get the usable size. The final size in pages can be
2965ca02815Sjsg  * taken from &ttm_resource_manager.size.
2975ca02815Sjsg  *
2985ca02815Sjsg  * Return: 0 on success, negative error code on failure.
2995ca02815Sjsg  */
i915_ttm_buddy_man_init(struct ttm_device * bdev,unsigned int type,bool use_tt,u64 size,u64 visible_size,u64 default_page_size,u64 chunk_size)3005ca02815Sjsg int i915_ttm_buddy_man_init(struct ttm_device *bdev,
3015ca02815Sjsg 			    unsigned int type, bool use_tt,
3021bb76ff1Sjsg 			    u64 size, u64 visible_size, u64 default_page_size,
3035ca02815Sjsg 			    u64 chunk_size)
3045ca02815Sjsg {
3055ca02815Sjsg 	struct ttm_resource_manager *man;
3065ca02815Sjsg 	struct i915_ttm_buddy_manager *bman;
3075ca02815Sjsg 	int err;
3085ca02815Sjsg 
3095ca02815Sjsg 	bman = kzalloc(sizeof(*bman), GFP_KERNEL);
3105ca02815Sjsg 	if (!bman)
3115ca02815Sjsg 		return -ENOMEM;
3125ca02815Sjsg 
3131bb76ff1Sjsg 	err = drm_buddy_init(&bman->mm, size, chunk_size);
3145ca02815Sjsg 	if (err)
3155ca02815Sjsg 		goto err_free_bman;
3165ca02815Sjsg 
3175ca02815Sjsg 	rw_init(&bman->lock, "bmlk");
3185ca02815Sjsg 	INIT_LIST_HEAD(&bman->reserved);
3195ca02815Sjsg 	GEM_BUG_ON(default_page_size < chunk_size);
3205ca02815Sjsg 	bman->default_page_size = default_page_size;
3211bb76ff1Sjsg 	bman->visible_size = visible_size >> PAGE_SHIFT;
3221bb76ff1Sjsg 	bman->visible_avail = bman->visible_size;
3235ca02815Sjsg 
3245ca02815Sjsg 	man = &bman->manager;
3255ca02815Sjsg 	man->use_tt = use_tt;
3265ca02815Sjsg 	man->func = &i915_ttm_buddy_manager_func;
3271bb76ff1Sjsg 	ttm_resource_manager_init(man, bdev, bman->mm.size >> PAGE_SHIFT);
3285ca02815Sjsg 
3295ca02815Sjsg 	ttm_resource_manager_set_used(man, true);
3305ca02815Sjsg 	ttm_set_driver_manager(bdev, type, man);
3315ca02815Sjsg 
3325ca02815Sjsg 	return 0;
3335ca02815Sjsg 
3345ca02815Sjsg err_free_bman:
3355ca02815Sjsg 	kfree(bman);
3365ca02815Sjsg 	return err;
3375ca02815Sjsg }
3385ca02815Sjsg 
3395ca02815Sjsg /**
3405ca02815Sjsg  * i915_ttm_buddy_man_fini - Destroy the buddy allocator ttm manager
3415ca02815Sjsg  * @bdev: The ttm device
3425ca02815Sjsg  * @type: Memory type we want to manage
3435ca02815Sjsg  *
3445ca02815Sjsg  * Note that if we reserved anything with &i915_ttm_buddy_man_reserve, this will
3455ca02815Sjsg  * also be freed for us here.
3465ca02815Sjsg  *
3475ca02815Sjsg  * Return: 0 on success, negative error code on failure.
3485ca02815Sjsg  */
i915_ttm_buddy_man_fini(struct ttm_device * bdev,unsigned int type)3495ca02815Sjsg int i915_ttm_buddy_man_fini(struct ttm_device *bdev, unsigned int type)
3505ca02815Sjsg {
3515ca02815Sjsg 	struct ttm_resource_manager *man = ttm_manager_type(bdev, type);
3525ca02815Sjsg 	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
3531bb76ff1Sjsg 	struct drm_buddy *mm = &bman->mm;
3545ca02815Sjsg 	int ret;
3555ca02815Sjsg 
3565ca02815Sjsg 	ttm_resource_manager_set_used(man, false);
3575ca02815Sjsg 
3585ca02815Sjsg 	ret = ttm_resource_manager_evict_all(bdev, man);
3595ca02815Sjsg 	if (ret)
3605ca02815Sjsg 		return ret;
3615ca02815Sjsg 
3625ca02815Sjsg 	ttm_set_driver_manager(bdev, type, NULL);
3635ca02815Sjsg 
3645ca02815Sjsg 	mutex_lock(&bman->lock);
3651bb76ff1Sjsg 	drm_buddy_free_list(mm, &bman->reserved);
3661bb76ff1Sjsg 	drm_buddy_fini(mm);
3671bb76ff1Sjsg 	bman->visible_avail += bman->visible_reserved;
3681bb76ff1Sjsg 	WARN_ON_ONCE(bman->visible_avail != bman->visible_size);
3695ca02815Sjsg 	mutex_unlock(&bman->lock);
3705ca02815Sjsg 
3715ca02815Sjsg 	ttm_resource_manager_cleanup(man);
3725ca02815Sjsg 	kfree(bman);
3735ca02815Sjsg 
3745ca02815Sjsg 	return 0;
3755ca02815Sjsg }
3765ca02815Sjsg 
3775ca02815Sjsg /**
3785ca02815Sjsg  * i915_ttm_buddy_man_reserve - Reserve address range
3795ca02815Sjsg  * @man: The buddy allocator ttm manager
3805ca02815Sjsg  * @start: The offset in bytes, where the region start is assumed to be zero
3815ca02815Sjsg  * @size: The size in bytes
3825ca02815Sjsg  *
3835ca02815Sjsg  * Note that the starting address for the region is always assumed to be zero.
3845ca02815Sjsg  *
3855ca02815Sjsg  * Return: 0 on success, negative error code on failure.
3865ca02815Sjsg  */
i915_ttm_buddy_man_reserve(struct ttm_resource_manager * man,u64 start,u64 size)3875ca02815Sjsg int i915_ttm_buddy_man_reserve(struct ttm_resource_manager *man,
3885ca02815Sjsg 			       u64 start, u64 size)
3895ca02815Sjsg {
3905ca02815Sjsg 	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
3911bb76ff1Sjsg 	struct drm_buddy *mm = &bman->mm;
3921bb76ff1Sjsg 	unsigned long fpfn = start >> PAGE_SHIFT;
3931bb76ff1Sjsg 	unsigned long flags = 0;
3945ca02815Sjsg 	int ret;
3955ca02815Sjsg 
3961bb76ff1Sjsg 	flags |= DRM_BUDDY_RANGE_ALLOCATION;
3971bb76ff1Sjsg 
3985ca02815Sjsg 	mutex_lock(&bman->lock);
3991bb76ff1Sjsg 	ret = drm_buddy_alloc_blocks(mm, start,
4001bb76ff1Sjsg 				     start + size,
4011bb76ff1Sjsg 				     size, mm->chunk_size,
4021bb76ff1Sjsg 				     &bman->reserved,
4031bb76ff1Sjsg 				     flags);
4041bb76ff1Sjsg 
4051bb76ff1Sjsg 	if (fpfn < bman->visible_size) {
4061bb76ff1Sjsg 		unsigned long lpfn = fpfn + (size >> PAGE_SHIFT);
4071bb76ff1Sjsg 		unsigned long visible = min(lpfn, bman->visible_size) - fpfn;
4081bb76ff1Sjsg 
4091bb76ff1Sjsg 		bman->visible_reserved += visible;
4101bb76ff1Sjsg 		bman->visible_avail -= visible;
4111bb76ff1Sjsg 	}
4125ca02815Sjsg 	mutex_unlock(&bman->lock);
4135ca02815Sjsg 
4145ca02815Sjsg 	return ret;
4155ca02815Sjsg }
4165ca02815Sjsg 
4171bb76ff1Sjsg /**
4181bb76ff1Sjsg  * i915_ttm_buddy_man_visible_size - Return the size of the CPU visible portion
4191bb76ff1Sjsg  * in pages.
4201bb76ff1Sjsg  * @man: The buddy allocator ttm manager
4211bb76ff1Sjsg  */
i915_ttm_buddy_man_visible_size(struct ttm_resource_manager * man)4221bb76ff1Sjsg u64 i915_ttm_buddy_man_visible_size(struct ttm_resource_manager *man)
4231bb76ff1Sjsg {
4241bb76ff1Sjsg 	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
4251bb76ff1Sjsg 
4261bb76ff1Sjsg 	return bman->visible_size;
4271bb76ff1Sjsg }
4281bb76ff1Sjsg 
4291bb76ff1Sjsg /**
4301bb76ff1Sjsg  * i915_ttm_buddy_man_avail - Query the avail tracking for the manager.
4311bb76ff1Sjsg  *
4321bb76ff1Sjsg  * @man: The buddy allocator ttm manager
4331bb76ff1Sjsg  * @avail: The total available memory in pages for the entire manager.
4341bb76ff1Sjsg  * @visible_avail: The total available memory in pages for the CPU visible
4351bb76ff1Sjsg  * portion. Note that this will always give the same value as @avail on
4361bb76ff1Sjsg  * configurations that don't have a small BAR.
4371bb76ff1Sjsg  */
i915_ttm_buddy_man_avail(struct ttm_resource_manager * man,u64 * avail,u64 * visible_avail)4381bb76ff1Sjsg void i915_ttm_buddy_man_avail(struct ttm_resource_manager *man,
4391bb76ff1Sjsg 			      u64 *avail, u64 *visible_avail)
4401bb76ff1Sjsg {
4411bb76ff1Sjsg 	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
4421bb76ff1Sjsg 
4431bb76ff1Sjsg 	mutex_lock(&bman->lock);
4441bb76ff1Sjsg 	*avail = bman->mm.avail >> PAGE_SHIFT;
4451bb76ff1Sjsg 	*visible_avail = bman->visible_avail;
4461bb76ff1Sjsg 	mutex_unlock(&bman->lock);
4471bb76ff1Sjsg }
4481bb76ff1Sjsg 
4491bb76ff1Sjsg #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
i915_ttm_buddy_man_force_visible_size(struct ttm_resource_manager * man,u64 size)4501bb76ff1Sjsg void i915_ttm_buddy_man_force_visible_size(struct ttm_resource_manager *man,
4511bb76ff1Sjsg 					   u64 size)
4521bb76ff1Sjsg {
4531bb76ff1Sjsg 	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
4541bb76ff1Sjsg 
4551bb76ff1Sjsg 	bman->visible_size = size;
4561bb76ff1Sjsg }
4571bb76ff1Sjsg #endif
458