Lines Matching refs:lmtt
33 #define lmtt_assert(lmtt, condition) xe_tile_assert(lmtt_to_tile(lmtt), condition) argument
34 #define lmtt_debug(lmtt, msg...) xe_sriov_dbg_verbose(lmtt_to_xe(lmtt), "LMTT: " msg) argument
41 static struct xe_tile *lmtt_to_tile(struct xe_lmtt *lmtt) in lmtt_to_tile() argument
43 return container_of(lmtt, struct xe_tile, sriov.pf.lmtt); in lmtt_to_tile()
46 static struct xe_device *lmtt_to_xe(struct xe_lmtt *lmtt) in lmtt_to_xe() argument
48 return tile_to_xe(lmtt_to_tile(lmtt)); in lmtt_to_xe()
51 static u64 lmtt_page_size(struct xe_lmtt *lmtt) in lmtt_page_size() argument
53 return BIT_ULL(lmtt->ops->lmtt_pte_shift(0)); in lmtt_page_size()
56 static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level) in lmtt_pt_alloc() argument
58 unsigned int num_entries = level ? lmtt->ops->lmtt_pte_num(level) : 0; in lmtt_pt_alloc()
69 bo = xe_bo_create_pin_map(lmtt_to_xe(lmtt), lmtt_to_tile(lmtt), NULL, in lmtt_pt_alloc()
70 PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) * in lmtt_pt_alloc()
71 lmtt->ops->lmtt_pte_num(level)), in lmtt_pt_alloc()
73 XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) | in lmtt_pt_alloc()
80 lmtt_assert(lmtt, xe_bo_is_vram(bo)); in lmtt_pt_alloc()
98 static int lmtt_init_pd(struct xe_lmtt *lmtt) in lmtt_init_pd() argument
102 lmtt_assert(lmtt, !lmtt->pd); in lmtt_init_pd()
103 lmtt_assert(lmtt, lmtt->ops->lmtt_root_pd_level()); in lmtt_init_pd()
105 pd = lmtt_pt_alloc(lmtt, lmtt->ops->lmtt_root_pd_level()); in lmtt_init_pd()
109 lmtt->pd = pd; in lmtt_init_pd()
113 static void lmtt_fini_pd(struct xe_lmtt *lmtt) in lmtt_fini_pd() argument
115 struct xe_lmtt_pt *pd = lmtt->pd; in lmtt_fini_pd()
116 unsigned int num_entries = lmtt->ops->lmtt_pte_num(pd->level); in lmtt_fini_pd()
121 lmtt_assert(lmtt, !pd->entries[n]); in lmtt_fini_pd()
123 lmtt->pd = NULL; in lmtt_fini_pd()
129 struct xe_lmtt *lmtt = arg; in fini_lmtt() local
131 lmtt_assert(lmtt, !(!!lmtt->ops ^ !!lmtt->pd)); in fini_lmtt()
133 if (!lmtt->pd) in fini_lmtt()
136 lmtt_fini_pd(lmtt); in fini_lmtt()
137 lmtt->ops = NULL; in fini_lmtt()
159 int xe_lmtt_init(struct xe_lmtt *lmtt) in xe_lmtt_init() argument
161 struct xe_device *xe = lmtt_to_xe(lmtt); in xe_lmtt_init()
164 lmtt_assert(lmtt, IS_SRIOV_PF(xe)); in xe_lmtt_init()
165 lmtt_assert(lmtt, !lmtt->ops); in xe_lmtt_init()
171 lmtt->ops = &lmtt_ml_ops; in xe_lmtt_init()
173 lmtt->ops = &lmtt_2l_ops; in xe_lmtt_init()
175 err = lmtt_init_pd(lmtt); in xe_lmtt_init()
179 return drmm_add_action_or_reset(&xe->drm, fini_lmtt, lmtt); in xe_lmtt_init()
182 lmtt->ops = NULL; in xe_lmtt_init()
186 static void lmtt_setup_dir_ptr(struct xe_lmtt *lmtt) in lmtt_setup_dir_ptr() argument
188 struct xe_tile *tile = lmtt_to_tile(lmtt); in lmtt_setup_dir_ptr()
190 dma_addr_t offset = xe_bo_main_addr(lmtt->pd->bo, XE_PAGE_SIZE); in lmtt_setup_dir_ptr()
192 lmtt_debug(lmtt, "DIR offset %pad\n", &offset); in lmtt_setup_dir_ptr()
193 lmtt_assert(lmtt, xe_bo_is_vram(lmtt->pd->bo)); in lmtt_setup_dir_ptr()
194 lmtt_assert(lmtt, IS_ALIGNED(offset, SZ_64K)); in lmtt_setup_dir_ptr()
211 void xe_lmtt_init_hw(struct xe_lmtt *lmtt) in xe_lmtt_init_hw() argument
213 if (!lmtt->pd) in xe_lmtt_init_hw()
216 lmtt_setup_dir_ptr(lmtt); in xe_lmtt_init_hw()
219 static void lmtt_write_pte(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pt, in lmtt_write_pte() argument
224 lmtt_assert(lmtt, idx <= lmtt->ops->lmtt_pte_num(level)); in lmtt_write_pte()
225 lmtt_debug(lmtt, "WRITE level=%u index=%u pte=%#llx\n", level, idx, pte); in lmtt_write_pte()
227 switch (lmtt->ops->lmtt_pte_size(level)) { in lmtt_write_pte()
229 xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u32), u32, pte); in lmtt_write_pte()
232 xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u64), u64, pte); in lmtt_write_pte()
235 lmtt_assert(lmtt, !!!"invalid pte size"); in lmtt_write_pte()
239 static void lmtt_destroy_pt(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pd) in lmtt_destroy_pt() argument
241 unsigned int num_entries = pd->level ? lmtt->ops->lmtt_pte_num(pd->level) : 0; in lmtt_destroy_pt()
251 lmtt_destroy_pt(lmtt, pt); in lmtt_destroy_pt()
257 static void lmtt_drop_pages(struct xe_lmtt *lmtt, unsigned int vfid) in lmtt_drop_pages() argument
259 struct xe_lmtt_pt *pd = lmtt->pd; in lmtt_drop_pages()
267 lmtt_write_pte(lmtt, pd, LMTT_PTE_INVALID, vfid); in lmtt_drop_pages()
269 lmtt_assert(lmtt, pd->level > 0); in lmtt_drop_pages()
270 lmtt_assert(lmtt, pt->level == pd->level - 1); in lmtt_drop_pages()
271 lmtt_destroy_pt(lmtt, pt); in lmtt_drop_pages()
274 static int __lmtt_alloc_range(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pd, in __lmtt_alloc_range() argument
277 u64 pte_addr_shift = BIT_ULL(lmtt->ops->lmtt_pte_shift(pd->level)); in __lmtt_alloc_range()
281 lmtt_assert(lmtt, pd->level > 0); in __lmtt_alloc_range()
289 pt = lmtt_pt_alloc(lmtt, pd->level - 1); in __lmtt_alloc_range()
295 idx = lmtt->ops->lmtt_pte_index(offset, pd->level); in __lmtt_alloc_range()
296 pde = lmtt->ops->lmtt_pte_encode(pt_addr, pd->level); in __lmtt_alloc_range()
298 lmtt_write_pte(lmtt, pd, pde, idx); in __lmtt_alloc_range()
305 err = __lmtt_alloc_range(lmtt, pt, offset, next); in __lmtt_alloc_range()
316 static int lmtt_alloc_range(struct xe_lmtt *lmtt, unsigned int vfid, u64 start, u64 end) in lmtt_alloc_range() argument
318 struct xe_lmtt_pt *pd = lmtt->pd; in lmtt_alloc_range()
324 lmtt_assert(lmtt, pd->level > 0); in lmtt_alloc_range()
325 lmtt_assert(lmtt, vfid <= lmtt->ops->lmtt_pte_num(pd->level)); in lmtt_alloc_range()
326 lmtt_assert(lmtt, IS_ALIGNED(start, lmtt_page_size(lmtt))); in lmtt_alloc_range()
327 lmtt_assert(lmtt, IS_ALIGNED(end, lmtt_page_size(lmtt))); in lmtt_alloc_range()
332 pt = lmtt_pt_alloc(lmtt, pd->level - 1); in lmtt_alloc_range()
338 pde = lmtt->ops->lmtt_pte_encode(pt_addr, pd->level); in lmtt_alloc_range()
340 lmtt_write_pte(lmtt, pd, pde, vfid); in lmtt_alloc_range()
345 err = __lmtt_alloc_range(lmtt, pt, start, end); in lmtt_alloc_range()
357 static struct xe_lmtt_pt *lmtt_leaf_pt(struct xe_lmtt *lmtt, unsigned int vfid, u64 addr) in lmtt_leaf_pt() argument
359 struct xe_lmtt_pt *pd = lmtt->pd; in lmtt_leaf_pt()
362 lmtt_assert(lmtt, vfid <= lmtt->ops->lmtt_pte_num(pd->level)); in lmtt_leaf_pt()
366 lmtt_assert(lmtt, lmtt->ops->lmtt_pte_index(addr, pt->level) <= in lmtt_leaf_pt()
367 lmtt->ops->lmtt_pte_num(pt->level)); in lmtt_leaf_pt()
369 pt = pt->entries[lmtt->ops->lmtt_pte_index(addr, pt->level)]; in lmtt_leaf_pt()
371 addr >>= lmtt->ops->lmtt_pte_shift(pt->level); in lmtt_leaf_pt()
374 lmtt_assert(lmtt, lmtt->ops->lmtt_pte_index(addr, pt->level) <= in lmtt_leaf_pt()
375 lmtt->ops->lmtt_pte_num(pt->level)); in lmtt_leaf_pt()
376 lmtt_assert(lmtt, pt->level != pd->level); in lmtt_leaf_pt()
377 lmtt_assert(lmtt, pt->level == 0); in lmtt_leaf_pt()
381 static void lmtt_insert_bo(struct xe_lmtt *lmtt, unsigned int vfid, struct xe_bo *bo, u64 start) in lmtt_insert_bo() argument
383 u64 page_size = lmtt_page_size(lmtt); in lmtt_insert_bo()
388 lmtt_assert(lmtt, IS_ALIGNED(start, page_size)); in lmtt_insert_bo()
389 lmtt_assert(lmtt, IS_ALIGNED(bo->size, page_size)); in lmtt_insert_bo()
390 lmtt_assert(lmtt, xe_bo_is_vram(bo)); in lmtt_insert_bo()
398 pt = lmtt_leaf_pt(lmtt, vfid, start); in lmtt_insert_bo()
400 lmtt_write_pte(lmtt, pt, lmtt->ops->lmtt_pte_encode(addr, 0), in lmtt_insert_bo()
401 lmtt->ops->lmtt_pte_index(start, 0)); in lmtt_insert_bo()
424 int xe_lmtt_prepare_pages(struct xe_lmtt *lmtt, unsigned int vfid, u64 range) in xe_lmtt_prepare_pages() argument
426 lmtt_assert(lmtt, lmtt->pd); in xe_lmtt_prepare_pages()
427 lmtt_assert(lmtt, vfid); in xe_lmtt_prepare_pages()
429 return lmtt_alloc_range(lmtt, vfid, 0, range); in xe_lmtt_prepare_pages()
447 int xe_lmtt_populate_pages(struct xe_lmtt *lmtt, unsigned int vfid, struct xe_bo *bo, u64 offset) in xe_lmtt_populate_pages() argument
449 lmtt_assert(lmtt, lmtt->pd); in xe_lmtt_populate_pages()
450 lmtt_assert(lmtt, vfid); in xe_lmtt_populate_pages()
452 lmtt_insert_bo(lmtt, vfid, bo, offset); in xe_lmtt_populate_pages()
466 void xe_lmtt_drop_pages(struct xe_lmtt *lmtt, unsigned int vfid) in xe_lmtt_drop_pages() argument
468 lmtt_assert(lmtt, lmtt->pd); in xe_lmtt_drop_pages()
469 lmtt_assert(lmtt, vfid); in xe_lmtt_drop_pages()
471 lmtt_drop_pages(lmtt, vfid); in xe_lmtt_drop_pages()
483 u64 xe_lmtt_estimate_pt_size(struct xe_lmtt *lmtt, u64 size) in xe_lmtt_estimate_pt_size() argument
488 lmtt_assert(lmtt, IS_SRIOV_PF(lmtt_to_xe(lmtt))); in xe_lmtt_estimate_pt_size()
489 lmtt_assert(lmtt, IS_DGFX(lmtt_to_xe(lmtt))); in xe_lmtt_estimate_pt_size()
490 lmtt_assert(lmtt, lmtt->ops); in xe_lmtt_estimate_pt_size()
492 pt_size = PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) * in xe_lmtt_estimate_pt_size()
493 lmtt->ops->lmtt_pte_num(level)); in xe_lmtt_estimate_pt_size()
495 while (++level < lmtt->ops->lmtt_root_pd_level()) { in xe_lmtt_estimate_pt_size()
496 pt_size *= lmtt->ops->lmtt_pte_index(size, level) + 1; in xe_lmtt_estimate_pt_size()
497 pt_size += PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) * in xe_lmtt_estimate_pt_size()
498 lmtt->ops->lmtt_pte_num(level)); in xe_lmtt_estimate_pt_size()