1 /* $NetBSD: intel_engine_pool.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $ */
2
3 /*
4 * SPDX-License-Identifier: MIT
5 *
6 * Copyright © 2014-2018 Intel Corporation
7 */
8
9 #include <sys/cdefs.h>
10 __KERNEL_RCSID(0, "$NetBSD: intel_engine_pool.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $");
11
12 #include "gem/i915_gem_object.h"
13
14 #include "i915_drv.h"
15 #include "intel_engine_pm.h"
16 #include "intel_engine_pool.h"
17
to_engine(struct intel_engine_pool * pool)18 static struct intel_engine_cs *to_engine(struct intel_engine_pool *pool)
19 {
20 return container_of(pool, struct intel_engine_cs, pool);
21 }
22
23 static struct list_head *
bucket_for_size(struct intel_engine_pool * pool,size_t sz)24 bucket_for_size(struct intel_engine_pool *pool, size_t sz)
25 {
26 int n;
27
28 /*
29 * Compute a power-of-two bucket, but throw everything greater than
30 * 16KiB into the same bucket: i.e. the buckets hold objects of
31 * (1 page, 2 pages, 4 pages, 8+ pages).
32 */
33 n = fls(sz >> PAGE_SHIFT) - 1;
34 if (n >= ARRAY_SIZE(pool->cache_list))
35 n = ARRAY_SIZE(pool->cache_list) - 1;
36
37 return &pool->cache_list[n];
38 }
39
node_free(struct intel_engine_pool_node * node)40 static void node_free(struct intel_engine_pool_node *node)
41 {
42 i915_gem_object_put(node->obj);
43 i915_active_fini(&node->active);
44 kfree(node);
45 }
46
pool_active(struct i915_active * ref)47 static int pool_active(struct i915_active *ref)
48 {
49 struct intel_engine_pool_node *node =
50 container_of(ref, typeof(*node), active);
51 struct dma_resv *resv = node->obj->base.resv;
52 int err;
53
54 if (dma_resv_trylock(resv)) {
55 dma_resv_add_excl_fence(resv, NULL);
56 dma_resv_unlock(resv);
57 }
58
59 err = i915_gem_object_pin_pages(node->obj);
60 if (err)
61 return err;
62
63 /* Hide this pinned object from the shrinker until retired */
64 i915_gem_object_make_unshrinkable(node->obj);
65
66 return 0;
67 }
68
69 __i915_active_call
pool_retire(struct i915_active * ref)70 static void pool_retire(struct i915_active *ref)
71 {
72 struct intel_engine_pool_node *node =
73 container_of(ref, typeof(*node), active);
74 struct intel_engine_pool *pool = node->pool;
75 struct list_head *list = bucket_for_size(pool, node->obj->base.size);
76 unsigned long flags;
77
78 GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
79
80 i915_gem_object_unpin_pages(node->obj);
81
82 /* Return this object to the shrinker pool */
83 i915_gem_object_make_purgeable(node->obj);
84
85 spin_lock_irqsave(&pool->lock, flags);
86 list_add(&node->link, list);
87 spin_unlock_irqrestore(&pool->lock, flags);
88 }
89
90 static struct intel_engine_pool_node *
node_create(struct intel_engine_pool * pool,size_t sz)91 node_create(struct intel_engine_pool *pool, size_t sz)
92 {
93 struct intel_engine_cs *engine = to_engine(pool);
94 struct intel_engine_pool_node *node;
95 struct drm_i915_gem_object *obj;
96
97 node = kmalloc(sizeof(*node),
98 GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
99 if (!node)
100 return ERR_PTR(-ENOMEM);
101
102 node->pool = pool;
103 i915_active_init(&node->active, pool_active, pool_retire);
104
105 obj = i915_gem_object_create_internal(engine->i915, sz);
106 if (IS_ERR(obj)) {
107 i915_active_fini(&node->active);
108 kfree(node);
109 return ERR_CAST(obj);
110 }
111
112 i915_gem_object_set_readonly(obj);
113
114 node->obj = obj;
115 return node;
116 }
117
lookup_pool(struct intel_engine_cs * engine)118 static struct intel_engine_pool *lookup_pool(struct intel_engine_cs *engine)
119 {
120 if (intel_engine_is_virtual(engine))
121 engine = intel_virtual_engine_get_sibling(engine, 0);
122
123 GEM_BUG_ON(!engine);
124 return &engine->pool;
125 }
126
127 struct intel_engine_pool_node *
intel_engine_get_pool(struct intel_engine_cs * engine,size_t size)128 intel_engine_get_pool(struct intel_engine_cs *engine, size_t size)
129 {
130 struct intel_engine_pool *pool = lookup_pool(engine);
131 struct intel_engine_pool_node *node;
132 struct list_head *list;
133 unsigned long flags;
134 int ret;
135
136 GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
137
138 size = PAGE_ALIGN(size);
139 list = bucket_for_size(pool, size);
140
141 spin_lock_irqsave(&pool->lock, flags);
142 list_for_each_entry(node, list, link) {
143 if (node->obj->base.size < size)
144 continue;
145 list_del(&node->link);
146 break;
147 }
148 spin_unlock_irqrestore(&pool->lock, flags);
149
150 if (&node->link == list) {
151 node = node_create(pool, size);
152 if (IS_ERR(node))
153 return node;
154 }
155
156 ret = i915_active_acquire(&node->active);
157 if (ret) {
158 node_free(node);
159 return ERR_PTR(ret);
160 }
161
162 return node;
163 }
164
intel_engine_pool_init(struct intel_engine_pool * pool)165 void intel_engine_pool_init(struct intel_engine_pool *pool)
166 {
167 int n;
168
169 spin_lock_init(&pool->lock);
170 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
171 INIT_LIST_HEAD(&pool->cache_list[n]);
172 }
173
intel_engine_pool_park(struct intel_engine_pool * pool)174 void intel_engine_pool_park(struct intel_engine_pool *pool)
175 {
176 int n;
177
178 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
179 struct list_head *list = &pool->cache_list[n];
180 struct intel_engine_pool_node *node, *nn;
181
182 list_for_each_entry_safe(node, nn, list, link)
183 node_free(node);
184
185 INIT_LIST_HEAD(list);
186 }
187 }
188
intel_engine_pool_fini(struct intel_engine_pool * pool)189 void intel_engine_pool_fini(struct intel_engine_pool *pool)
190 {
191 int n;
192
193 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
194 GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
195 }
196