1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2007, Intel Corporation.
4  * All Rights Reserved.
5  *
6  * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
7  *	    Alan Cox <alan@linux.intel.com>
8  */
9 
10 #include <linux/shmem_fs.h>
11 
12 #include <asm/set_memory.h>
13 
14 #include "psb_drv.h"
15 
16 
17 /*
18  *	GTT resource allocator - manage page mappings in GTT space
19  */
20 
21 /**
22  *	psb_gtt_mask_pte	-	generate GTT pte entry
23  *	@pfn: page number to encode
24  *	@type: type of memory in the GTT
25  *
26  *	Set the GTT entry for the appropriate memory type.
27  */
psb_gtt_mask_pte(uint32_t pfn,int type)28 static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
29 {
30 	uint32_t mask = PSB_PTE_VALID;
31 
32 	/* Ensure we explode rather than put an invalid low mapping of
33 	   a high mapping page into the gtt */
34 	BUG_ON(pfn & ~(0xFFFFFFFF >> PAGE_SHIFT));
35 
36 	if (type & PSB_MMU_CACHED_MEMORY)
37 		mask |= PSB_PTE_CACHED;
38 	if (type & PSB_MMU_RO_MEMORY)
39 		mask |= PSB_PTE_RO;
40 	if (type & PSB_MMU_WO_MEMORY)
41 		mask |= PSB_PTE_WO;
42 
43 	return (pfn << PAGE_SHIFT) | mask;
44 }
45 
46 /**
47  *	psb_gtt_entry		-	find the GTT entries for a gtt_range
48  *	@dev: our DRM device
49  *	@r: our GTT range
50  *
51  *	Given a gtt_range object return the GTT offset of the page table
52  *	entries for this gtt_range
53  */
psb_gtt_entry(struct drm_device * dev,struct gtt_range * r)54 static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
55 {
56 	struct drm_psb_private *dev_priv = dev->dev_private;
57 	unsigned long offset;
58 
59 	offset = r->resource.start - dev_priv->gtt_mem->start;
60 
61 	return dev_priv->gtt_map + (offset >> PAGE_SHIFT);
62 }
63 
64 /**
65  *	psb_gtt_insert	-	put an object into the GTT
66  *	@dev: our DRM device
67  *	@r: our GTT range
68  *	@resume: on resume
69  *
70  *	Take our preallocated GTT range and insert the GEM object into
71  *	the GTT. This is protected via the gtt mutex which the caller
72  *	must hold.
73  */
psb_gtt_insert(struct drm_device * dev,struct gtt_range * r,int resume)74 static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
75 			  int resume)
76 {
77 	u32 __iomem *gtt_slot;
78 	u32 pte;
79 	struct page **pages;
80 	int i;
81 
82 	if (r->pages == NULL) {
83 		WARN_ON(1);
84 		return -EINVAL;
85 	}
86 
87 	WARN_ON(r->stolen);	/* refcount these maybe ? */
88 
89 	gtt_slot = psb_gtt_entry(dev, r);
90 	pages = r->pages;
91 
92 	if (!resume) {
93 		/* Make sure changes are visible to the GPU */
94 		set_pages_array_wc(pages, r->npage);
95 	}
96 
97 	/* Write our page entries into the GTT itself */
98 	for (i = 0; i < r->npage; i++) {
99 		pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
100 				       PSB_MMU_CACHED_MEMORY);
101 		iowrite32(pte, gtt_slot++);
102 	}
103 
104 	/* Make sure all the entries are set before we return */
105 	ioread32(gtt_slot - 1);
106 
107 	return 0;
108 }
109 
110 /**
111  *	psb_gtt_remove	-	remove an object from the GTT
112  *	@dev: our DRM device
113  *	@r: our GTT range
114  *
115  *	Remove a preallocated GTT range from the GTT. Overwrite all the
116  *	page table entries with the dummy page. This is protected via the gtt
117  *	mutex which the caller must hold.
118  */
psb_gtt_remove(struct drm_device * dev,struct gtt_range * r)119 static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
120 {
121 	struct drm_psb_private *dev_priv = dev->dev_private;
122 	u32 __iomem *gtt_slot;
123 	u32 pte;
124 	int i;
125 
126 	WARN_ON(r->stolen);
127 
128 	gtt_slot = psb_gtt_entry(dev, r);
129 	pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page),
130 			       PSB_MMU_CACHED_MEMORY);
131 
132 	for (i = 0; i < r->npage; i++)
133 		iowrite32(pte, gtt_slot++);
134 	ioread32(gtt_slot - 1);
135 	set_pages_array_wb(r->pages, r->npage);
136 }
137 
138 /**
139  *	psb_gtt_attach_pages	-	attach and pin GEM pages
140  *	@gt: the gtt range
141  *
142  *	Pin and build an in kernel list of the pages that back our GEM object.
143  *	While we hold this the pages cannot be swapped out. This is protected
144  *	via the gtt mutex which the caller must hold.
145  */
psb_gtt_attach_pages(struct gtt_range * gt)146 static int psb_gtt_attach_pages(struct gtt_range *gt)
147 {
148 	struct page **pages;
149 
150 	WARN_ON(gt->pages);
151 
152 	pages = drm_gem_get_pages(&gt->gem);
153 	if (IS_ERR(pages))
154 		return PTR_ERR(pages);
155 
156 	gt->npage = gt->gem.size / PAGE_SIZE;
157 	gt->pages = pages;
158 
159 	return 0;
160 }
161 
162 /**
163  *	psb_gtt_detach_pages	-	attach and pin GEM pages
164  *	@gt: the gtt range
165  *
166  *	Undo the effect of psb_gtt_attach_pages. At this point the pages
167  *	must have been removed from the GTT as they could now be paged out
168  *	and move bus address. This is protected via the gtt mutex which the
169  *	caller must hold.
170  */
psb_gtt_detach_pages(struct gtt_range * gt)171 static void psb_gtt_detach_pages(struct gtt_range *gt)
172 {
173 	drm_gem_put_pages(&gt->gem, gt->pages, true, false);
174 	gt->pages = NULL;
175 }
176 
177 /**
178  *	psb_gtt_pin		-	pin pages into the GTT
179  *	@gt: range to pin
180  *
181  *	Pin a set of pages into the GTT. The pins are refcounted so that
182  *	multiple pins need multiple unpins to undo.
183  *
184  *	Non GEM backed objects treat this as a no-op as they are always GTT
185  *	backed objects.
186  */
psb_gtt_pin(struct gtt_range * gt)187 int psb_gtt_pin(struct gtt_range *gt)
188 {
189 	int ret = 0;
190 	struct drm_device *dev = gt->gem.dev;
191 	struct drm_psb_private *dev_priv = dev->dev_private;
192 	u32 gpu_base = dev_priv->gtt.gatt_start;
193 
194 	mutex_lock(&dev_priv->gtt_mutex);
195 
196 	if (gt->in_gart == 0 && gt->stolen == 0) {
197 		ret = psb_gtt_attach_pages(gt);
198 		if (ret < 0)
199 			goto out;
200 		ret = psb_gtt_insert(dev, gt, 0);
201 		if (ret < 0) {
202 			psb_gtt_detach_pages(gt);
203 			goto out;
204 		}
205 		psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
206 				     gt->pages, (gpu_base + gt->offset),
207 				     gt->npage, 0, 0, PSB_MMU_CACHED_MEMORY);
208 	}
209 	gt->in_gart++;
210 out:
211 	mutex_unlock(&dev_priv->gtt_mutex);
212 	return ret;
213 }
214 
215 /**
216  *	psb_gtt_unpin		-	Drop a GTT pin requirement
217  *	@gt: range to pin
218  *
219  *	Undoes the effect of psb_gtt_pin. On the last drop the GEM object
220  *	will be removed from the GTT which will also drop the page references
221  *	and allow the VM to clean up or page stuff.
222  *
223  *	Non GEM backed objects treat this as a no-op as they are always GTT
224  *	backed objects.
225  */
psb_gtt_unpin(struct gtt_range * gt)226 void psb_gtt_unpin(struct gtt_range *gt)
227 {
228 	struct drm_device *dev = gt->gem.dev;
229 	struct drm_psb_private *dev_priv = dev->dev_private;
230 	u32 gpu_base = dev_priv->gtt.gatt_start;
231 
232 	mutex_lock(&dev_priv->gtt_mutex);
233 
234 	WARN_ON(!gt->in_gart);
235 
236 	gt->in_gart--;
237 	if (gt->in_gart == 0 && gt->stolen == 0) {
238 		psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu),
239 				     (gpu_base + gt->offset), gt->npage, 0, 0);
240 		psb_gtt_remove(dev, gt);
241 		psb_gtt_detach_pages(gt);
242 	}
243 
244 	mutex_unlock(&dev_priv->gtt_mutex);
245 }
246 
247 /*
248  *	GTT resource allocator - allocate and manage GTT address space
249  */
250 
251 /**
252  *	psb_gtt_alloc_range	-	allocate GTT address space
253  *	@dev: Our DRM device
254  *	@len: length (bytes) of address space required
255  *	@name: resource name
256  *	@backed: resource should be backed by stolen pages
257  *	@align: requested alignment
258  *
259  *	Ask the kernel core to find us a suitable range of addresses
260  *	to use for a GTT mapping.
261  *
262  *	Returns a gtt_range structure describing the object, or NULL on
263  *	error. On successful return the resource is both allocated and marked
264  *	as in use.
265  */
psb_gtt_alloc_range(struct drm_device * dev,int len,const char * name,int backed,u32 align)266 struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
267 				      const char *name, int backed, u32 align)
268 {
269 	struct drm_psb_private *dev_priv = dev->dev_private;
270 	struct gtt_range *gt;
271 	struct resource *r = dev_priv->gtt_mem;
272 	int ret;
273 	unsigned long start, end;
274 
275 	if (backed) {
276 		/* The start of the GTT is the stolen pages */
277 		start = r->start;
278 		end = r->start + dev_priv->gtt.stolen_size - 1;
279 	} else {
280 		/* The rest we will use for GEM backed objects */
281 		start = r->start + dev_priv->gtt.stolen_size;
282 		end = r->end;
283 	}
284 
285 	gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL);
286 	if (gt == NULL)
287 		return NULL;
288 	gt->resource.name = name;
289 	gt->stolen = backed;
290 	gt->in_gart = backed;
291 	/* Ensure this is set for non GEM objects */
292 	gt->gem.dev = dev;
293 	ret = allocate_resource(dev_priv->gtt_mem, &gt->resource,
294 				len, start, end, align, NULL, NULL);
295 	if (ret == 0) {
296 		gt->offset = gt->resource.start - r->start;
297 		return gt;
298 	}
299 	kfree(gt);
300 	return NULL;
301 }
302 
303 /**
304  *	psb_gtt_free_range	-	release GTT address space
305  *	@dev: our DRM device
306  *	@gt: a mapping created with psb_gtt_alloc_range
307  *
308  *	Release a resource that was allocated with psb_gtt_alloc_range. If the
309  *	object has been pinned by mmap users we clean this up here currently.
310  */
psb_gtt_free_range(struct drm_device * dev,struct gtt_range * gt)311 void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
312 {
313 	/* Undo the mmap pin if we are destroying the object */
314 	if (gt->mmapping) {
315 		psb_gtt_unpin(gt);
316 		gt->mmapping = 0;
317 	}
318 	WARN_ON(gt->in_gart && !gt->stolen);
319 	release_resource(&gt->resource);
320 	kfree(gt);
321 }
322 
psb_gtt_alloc(struct drm_device * dev)323 static void psb_gtt_alloc(struct drm_device *dev)
324 {
325 	struct drm_psb_private *dev_priv = dev->dev_private;
326 	init_rwsem(&dev_priv->gtt.sem);
327 }
328 
psb_gtt_takedown(struct drm_device * dev)329 void psb_gtt_takedown(struct drm_device *dev)
330 {
331 	struct drm_psb_private *dev_priv = dev->dev_private;
332 	struct pci_dev *pdev = to_pci_dev(dev->dev);
333 
334 	if (dev_priv->gtt_map) {
335 		iounmap(dev_priv->gtt_map);
336 		dev_priv->gtt_map = NULL;
337 	}
338 	if (dev_priv->gtt_initialized) {
339 		pci_write_config_word(pdev, PSB_GMCH_CTRL,
340 				      dev_priv->gmch_ctrl);
341 		PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
342 		(void) PSB_RVDC32(PSB_PGETBL_CTL);
343 	}
344 	if (dev_priv->vram_addr)
345 		iounmap(dev_priv->gtt_map);
346 }
347 
psb_gtt_init(struct drm_device * dev,int resume)348 int psb_gtt_init(struct drm_device *dev, int resume)
349 {
350 	struct drm_psb_private *dev_priv = dev->dev_private;
351 	struct pci_dev *pdev = to_pci_dev(dev->dev);
352 	unsigned gtt_pages;
353 	unsigned long stolen_size, vram_stolen_size;
354 	unsigned i, num_pages;
355 	unsigned pfn_base;
356 	struct psb_gtt *pg;
357 
358 	int ret = 0;
359 	uint32_t pte;
360 
361 	if (!resume) {
362 		mutex_init(&dev_priv->gtt_mutex);
363 		mutex_init(&dev_priv->mmap_mutex);
364 		psb_gtt_alloc(dev);
365 	}
366 
367 	pg = &dev_priv->gtt;
368 
369 	/* Enable the GTT */
370 	pci_read_config_word(pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
371 	pci_write_config_word(pdev, PSB_GMCH_CTRL,
372 			      dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
373 
374 	dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
375 	PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
376 	(void) PSB_RVDC32(PSB_PGETBL_CTL);
377 
378 	/* The root resource we allocate address space from */
379 	dev_priv->gtt_initialized = 1;
380 
381 	pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
382 
383 	/*
384 	 *	The video mmu has a hw bug when accessing 0x0D0000000.
385 	 *	Make gatt start at 0x0e000,0000. This doesn't actually
386 	 *	matter for us but may do if the video acceleration ever
387 	 *	gets opened up.
388 	 */
389 	pg->mmu_gatt_start = 0xE0000000;
390 
391 	pg->gtt_start = pci_resource_start(pdev, PSB_GTT_RESOURCE);
392 	gtt_pages = pci_resource_len(pdev, PSB_GTT_RESOURCE)
393 								>> PAGE_SHIFT;
394 	/* CDV doesn't report this. In which case the system has 64 gtt pages */
395 	if (pg->gtt_start == 0 || gtt_pages == 0) {
396 		dev_dbg(dev->dev, "GTT PCI BAR not initialized.\n");
397 		gtt_pages = 64;
398 		pg->gtt_start = dev_priv->pge_ctl;
399 	}
400 
401 	pg->gatt_start = pci_resource_start(pdev, PSB_GATT_RESOURCE);
402 	pg->gatt_pages = pci_resource_len(pdev, PSB_GATT_RESOURCE)
403 								>> PAGE_SHIFT;
404 	dev_priv->gtt_mem = &pdev->resource[PSB_GATT_RESOURCE];
405 
406 	if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
407 		static struct resource fudge;	/* Preferably peppermint */
408 		/* This can occur on CDV systems. Fudge it in this case.
409 		   We really don't care what imaginary space is being allocated
410 		   at this point */
411 		dev_dbg(dev->dev, "GATT PCI BAR not initialized.\n");
412 		pg->gatt_start = 0x40000000;
413 		pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
414 		/* This is a little confusing but in fact the GTT is providing
415 		   a view from the GPU into memory and not vice versa. As such
416 		   this is really allocating space that is not the same as the
417 		   CPU address space on CDV */
418 		fudge.start = 0x40000000;
419 		fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1;
420 		fudge.name = "fudge";
421 		fudge.flags = IORESOURCE_MEM;
422 		dev_priv->gtt_mem = &fudge;
423 	}
424 
425 	pci_read_config_dword(pdev, PSB_BSM, &dev_priv->stolen_base);
426 	vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base
427 								- PAGE_SIZE;
428 
429 	stolen_size = vram_stolen_size;
430 
431 	dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n",
432 			dev_priv->stolen_base, vram_stolen_size / 1024);
433 
434 	if (resume && (gtt_pages != pg->gtt_pages) &&
435 	    (stolen_size != pg->stolen_size)) {
436 		dev_err(dev->dev, "GTT resume error.\n");
437 		ret = -EINVAL;
438 		goto out_err;
439 	}
440 
441 	pg->gtt_pages = gtt_pages;
442 	pg->stolen_size = stolen_size;
443 	dev_priv->vram_stolen_size = vram_stolen_size;
444 
445 	/*
446 	 *	Map the GTT and the stolen memory area
447 	 */
448 	if (!resume)
449 		dev_priv->gtt_map = ioremap(pg->gtt_phys_start,
450 						gtt_pages << PAGE_SHIFT);
451 	if (!dev_priv->gtt_map) {
452 		dev_err(dev->dev, "Failure to map gtt.\n");
453 		ret = -ENOMEM;
454 		goto out_err;
455 	}
456 
457 	if (!resume)
458 		dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base,
459 						 stolen_size);
460 
461 	if (!dev_priv->vram_addr) {
462 		dev_err(dev->dev, "Failure to map stolen base.\n");
463 		ret = -ENOMEM;
464 		goto out_err;
465 	}
466 
467 	/*
468 	 * Insert vram stolen pages into the GTT
469 	 */
470 
471 	pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
472 	num_pages = vram_stolen_size >> PAGE_SHIFT;
473 	dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
474 		num_pages, pfn_base << PAGE_SHIFT, 0);
475 	for (i = 0; i < num_pages; ++i) {
476 		pte = psb_gtt_mask_pte(pfn_base + i, PSB_MMU_CACHED_MEMORY);
477 		iowrite32(pte, dev_priv->gtt_map + i);
478 	}
479 
480 	/*
481 	 * Init rest of GTT to the scratch page to avoid accidents or scribbles
482 	 */
483 
484 	pfn_base = page_to_pfn(dev_priv->scratch_page);
485 	pte = psb_gtt_mask_pte(pfn_base, PSB_MMU_CACHED_MEMORY);
486 	for (; i < gtt_pages; ++i)
487 		iowrite32(pte, dev_priv->gtt_map + i);
488 
489 	(void) ioread32(dev_priv->gtt_map + i - 1);
490 	return 0;
491 
492 out_err:
493 	psb_gtt_takedown(dev);
494 	return ret;
495 }
496 
psb_gtt_restore(struct drm_device * dev)497 int psb_gtt_restore(struct drm_device *dev)
498 {
499 	struct drm_psb_private *dev_priv = dev->dev_private;
500 	struct resource *r = dev_priv->gtt_mem->child;
501 	struct gtt_range *range;
502 	unsigned int restored = 0, total = 0, size = 0;
503 
504 	/* On resume, the gtt_mutex is already initialized */
505 	mutex_lock(&dev_priv->gtt_mutex);
506 	psb_gtt_init(dev, 1);
507 
508 	while (r != NULL) {
509 		range = container_of(r, struct gtt_range, resource);
510 		if (range->pages) {
511 			psb_gtt_insert(dev, range, 1);
512 			size += range->resource.end - range->resource.start;
513 			restored++;
514 		}
515 		r = r->sibling;
516 		total++;
517 	}
518 	mutex_unlock(&dev_priv->gtt_mutex);
519 	DRM_DEBUG_DRIVER("Restored %u of %u gtt ranges (%u KB)", restored,
520 			 total, (size / 1024));
521 
522 	return 0;
523 }
524