1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Tegra host1x Job
4  *
5  * Copyright (c) 2010-2015, NVIDIA Corporation.
6  */
7 
8 #include <linux/dma-mapping.h>
9 #include <linux/err.h>
10 #include <linux/host1x.h>
11 #include <linux/iommu.h>
12 #include <linux/kref.h>
13 #include <linux/module.h>
14 #include <linux/scatterlist.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 #include <trace/events/host1x.h>
18 
19 #include "channel.h"
20 #include "dev.h"
21 #include "job.h"
22 #include "syncpt.h"
23 
24 #define HOST1X_WAIT_SYNCPT_OFFSET 0x8
25 
host1x_job_alloc(struct host1x_channel * ch,u32 num_cmdbufs,u32 num_relocs)26 struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
27 				    u32 num_cmdbufs, u32 num_relocs)
28 {
29 	struct host1x_job *job = NULL;
30 	unsigned int num_unpins = num_relocs;
31 	u64 total;
32 	void *mem;
33 
34 	if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
35 		num_unpins += num_cmdbufs;
36 
37 	/* Check that we're not going to overflow */
38 	total = sizeof(struct host1x_job) +
39 		(u64)num_relocs * sizeof(struct host1x_reloc) +
40 		(u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
41 		(u64)num_cmdbufs * sizeof(struct host1x_job_gather) +
42 		(u64)num_unpins * sizeof(dma_addr_t) +
43 		(u64)num_unpins * sizeof(u32 *);
44 	if (total > ULONG_MAX)
45 		return NULL;
46 
47 	mem = job = kzalloc(total, GFP_KERNEL);
48 	if (!job)
49 		return NULL;
50 
51 	kref_init(&job->ref);
52 	job->channel = ch;
53 
54 	/* Redistribute memory to the structs  */
55 	mem += sizeof(struct host1x_job);
56 	job->relocs = num_relocs ? mem : NULL;
57 	mem += num_relocs * sizeof(struct host1x_reloc);
58 	job->unpins = num_unpins ? mem : NULL;
59 	mem += num_unpins * sizeof(struct host1x_job_unpin_data);
60 	job->gathers = num_cmdbufs ? mem : NULL;
61 	mem += num_cmdbufs * sizeof(struct host1x_job_gather);
62 	job->addr_phys = num_unpins ? mem : NULL;
63 
64 	job->reloc_addr_phys = job->addr_phys;
65 	job->gather_addr_phys = &job->addr_phys[num_relocs];
66 
67 	return job;
68 }
69 EXPORT_SYMBOL(host1x_job_alloc);
70 
host1x_job_get(struct host1x_job * job)71 struct host1x_job *host1x_job_get(struct host1x_job *job)
72 {
73 	kref_get(&job->ref);
74 	return job;
75 }
76 EXPORT_SYMBOL(host1x_job_get);
77 
job_free(struct kref * ref)78 static void job_free(struct kref *ref)
79 {
80 	struct host1x_job *job = container_of(ref, struct host1x_job, ref);
81 
82 	if (job->syncpt)
83 		host1x_syncpt_put(job->syncpt);
84 
85 	kfree(job);
86 }
87 
host1x_job_put(struct host1x_job * job)88 void host1x_job_put(struct host1x_job *job)
89 {
90 	kref_put(&job->ref, job_free);
91 }
92 EXPORT_SYMBOL(host1x_job_put);
93 
host1x_job_add_gather(struct host1x_job * job,struct host1x_bo * bo,unsigned int words,unsigned int offset)94 void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
95 			   unsigned int words, unsigned int offset)
96 {
97 	struct host1x_job_gather *gather = &job->gathers[job->num_gathers];
98 
99 	gather->words = words;
100 	gather->bo = bo;
101 	gather->offset = offset;
102 
103 	job->num_gathers++;
104 }
105 EXPORT_SYMBOL(host1x_job_add_gather);
106 
pin_job(struct host1x * host,struct host1x_job * job)107 static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
108 {
109 	struct host1x_client *client = job->client;
110 	struct device *dev = client->dev;
111 	struct host1x_job_gather *g;
112 	struct iommu_domain *domain;
113 	unsigned int i;
114 	int err;
115 
116 	domain = iommu_get_domain_for_dev(dev);
117 	job->num_unpins = 0;
118 
119 	for (i = 0; i < job->num_relocs; i++) {
120 		struct host1x_reloc *reloc = &job->relocs[i];
121 		dma_addr_t phys_addr, *phys;
122 		struct sg_table *sgt;
123 
124 		reloc->target.bo = host1x_bo_get(reloc->target.bo);
125 		if (!reloc->target.bo) {
126 			err = -EINVAL;
127 			goto unpin;
128 		}
129 
130 		/*
131 		 * If the client device is not attached to an IOMMU, the
132 		 * physical address of the buffer object can be used.
133 		 *
134 		 * Similarly, when an IOMMU domain is shared between all
135 		 * host1x clients, the IOVA is already available, so no
136 		 * need to map the buffer object again.
137 		 *
138 		 * XXX Note that this isn't always safe to do because it
139 		 * relies on an assumption that no cache maintenance is
140 		 * needed on the buffer objects.
141 		 */
142 		if (!domain || client->group)
143 			phys = &phys_addr;
144 		else
145 			phys = NULL;
146 
147 		sgt = host1x_bo_pin(dev, reloc->target.bo, phys);
148 		if (IS_ERR(sgt)) {
149 			err = PTR_ERR(sgt);
150 			goto unpin;
151 		}
152 
153 		if (sgt) {
154 			unsigned long mask = HOST1X_RELOC_READ |
155 					     HOST1X_RELOC_WRITE;
156 			enum dma_data_direction dir;
157 
158 			switch (reloc->flags & mask) {
159 			case HOST1X_RELOC_READ:
160 				dir = DMA_TO_DEVICE;
161 				break;
162 
163 			case HOST1X_RELOC_WRITE:
164 				dir = DMA_FROM_DEVICE;
165 				break;
166 
167 			case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
168 				dir = DMA_BIDIRECTIONAL;
169 				break;
170 
171 			default:
172 				err = -EINVAL;
173 				goto unpin;
174 			}
175 
176 			err = dma_map_sgtable(dev, sgt, dir, 0);
177 			if (err)
178 				goto unpin;
179 
180 			job->unpins[job->num_unpins].dev = dev;
181 			job->unpins[job->num_unpins].dir = dir;
182 			phys_addr = sg_dma_address(sgt->sgl);
183 		}
184 
185 		job->addr_phys[job->num_unpins] = phys_addr;
186 		job->unpins[job->num_unpins].bo = reloc->target.bo;
187 		job->unpins[job->num_unpins].sgt = sgt;
188 		job->num_unpins++;
189 	}
190 
191 	/*
192 	 * We will copy gathers BO content later, so there is no need to
193 	 * hold and pin them.
194 	 */
195 	if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
196 		return 0;
197 
198 	for (i = 0; i < job->num_gathers; i++) {
199 		size_t gather_size = 0;
200 		struct scatterlist *sg;
201 		struct sg_table *sgt;
202 		dma_addr_t phys_addr;
203 		unsigned long shift;
204 		struct iova *alloc;
205 		dma_addr_t *phys;
206 		unsigned int j;
207 
208 		g = &job->gathers[i];
209 		g->bo = host1x_bo_get(g->bo);
210 		if (!g->bo) {
211 			err = -EINVAL;
212 			goto unpin;
213 		}
214 
215 		/**
216 		 * If the host1x is not attached to an IOMMU, there is no need
217 		 * to map the buffer object for the host1x, since the physical
218 		 * address can simply be used.
219 		 */
220 		if (!iommu_get_domain_for_dev(host->dev))
221 			phys = &phys_addr;
222 		else
223 			phys = NULL;
224 
225 		sgt = host1x_bo_pin(host->dev, g->bo, phys);
226 		if (IS_ERR(sgt)) {
227 			err = PTR_ERR(sgt);
228 			goto put;
229 		}
230 
231 		if (host->domain) {
232 			for_each_sgtable_sg(sgt, sg, j)
233 				gather_size += sg->length;
234 			gather_size = iova_align(&host->iova, gather_size);
235 
236 			shift = iova_shift(&host->iova);
237 			alloc = alloc_iova(&host->iova, gather_size >> shift,
238 					   host->iova_end >> shift, true);
239 			if (!alloc) {
240 				err = -ENOMEM;
241 				goto put;
242 			}
243 
244 			err = iommu_map_sgtable(host->domain,
245 					iova_dma_addr(&host->iova, alloc),
246 					sgt, IOMMU_READ);
247 			if (err == 0) {
248 				__free_iova(&host->iova, alloc);
249 				err = -EINVAL;
250 				goto put;
251 			}
252 
253 			job->unpins[job->num_unpins].size = gather_size;
254 			phys_addr = iova_dma_addr(&host->iova, alloc);
255 		} else if (sgt) {
256 			err = dma_map_sgtable(host->dev, sgt, DMA_TO_DEVICE, 0);
257 			if (err)
258 				goto put;
259 
260 			job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
261 			job->unpins[job->num_unpins].dev = host->dev;
262 			phys_addr = sg_dma_address(sgt->sgl);
263 		}
264 
265 		job->addr_phys[job->num_unpins] = phys_addr;
266 		job->gather_addr_phys[i] = phys_addr;
267 
268 		job->unpins[job->num_unpins].bo = g->bo;
269 		job->unpins[job->num_unpins].sgt = sgt;
270 		job->num_unpins++;
271 	}
272 
273 	return 0;
274 
275 put:
276 	host1x_bo_put(g->bo);
277 unpin:
278 	host1x_job_unpin(job);
279 	return err;
280 }
281 
do_relocs(struct host1x_job * job,struct host1x_job_gather * g)282 static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
283 {
284 	void *cmdbuf_addr = NULL;
285 	struct host1x_bo *cmdbuf = g->bo;
286 	unsigned int i;
287 
288 	/* pin & patch the relocs for one gather */
289 	for (i = 0; i < job->num_relocs; i++) {
290 		struct host1x_reloc *reloc = &job->relocs[i];
291 		u32 reloc_addr = (job->reloc_addr_phys[i] +
292 				  reloc->target.offset) >> reloc->shift;
293 		u32 *target;
294 
295 		/* skip all other gathers */
296 		if (cmdbuf != reloc->cmdbuf.bo)
297 			continue;
298 
299 		if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
300 			target = (u32 *)job->gather_copy_mapped +
301 					reloc->cmdbuf.offset / sizeof(u32) +
302 						g->offset / sizeof(u32);
303 			goto patch_reloc;
304 		}
305 
306 		if (!cmdbuf_addr) {
307 			cmdbuf_addr = host1x_bo_mmap(cmdbuf);
308 
309 			if (unlikely(!cmdbuf_addr)) {
310 				pr_err("Could not map cmdbuf for relocation\n");
311 				return -ENOMEM;
312 			}
313 		}
314 
315 		target = cmdbuf_addr + reloc->cmdbuf.offset;
316 patch_reloc:
317 		*target = reloc_addr;
318 	}
319 
320 	if (cmdbuf_addr)
321 		host1x_bo_munmap(cmdbuf, cmdbuf_addr);
322 
323 	return 0;
324 }
325 
check_reloc(struct host1x_reloc * reloc,struct host1x_bo * cmdbuf,unsigned int offset)326 static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
327 			unsigned int offset)
328 {
329 	offset *= sizeof(u32);
330 
331 	if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
332 		return false;
333 
334 	/* relocation shift value validation isn't implemented yet */
335 	if (reloc->shift)
336 		return false;
337 
338 	return true;
339 }
340 
341 struct host1x_firewall {
342 	struct host1x_job *job;
343 	struct device *dev;
344 
345 	unsigned int num_relocs;
346 	struct host1x_reloc *reloc;
347 
348 	struct host1x_bo *cmdbuf;
349 	unsigned int offset;
350 
351 	u32 words;
352 	u32 class;
353 	u32 reg;
354 	u32 mask;
355 	u32 count;
356 };
357 
check_register(struct host1x_firewall * fw,unsigned long offset)358 static int check_register(struct host1x_firewall *fw, unsigned long offset)
359 {
360 	if (!fw->job->is_addr_reg)
361 		return 0;
362 
363 	if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
364 		if (!fw->num_relocs)
365 			return -EINVAL;
366 
367 		if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
368 			return -EINVAL;
369 
370 		fw->num_relocs--;
371 		fw->reloc++;
372 	}
373 
374 	return 0;
375 }
376 
check_class(struct host1x_firewall * fw,u32 class)377 static int check_class(struct host1x_firewall *fw, u32 class)
378 {
379 	if (!fw->job->is_valid_class) {
380 		if (fw->class != class)
381 			return -EINVAL;
382 	} else {
383 		if (!fw->job->is_valid_class(fw->class))
384 			return -EINVAL;
385 	}
386 
387 	return 0;
388 }
389 
check_mask(struct host1x_firewall * fw)390 static int check_mask(struct host1x_firewall *fw)
391 {
392 	u32 mask = fw->mask;
393 	u32 reg = fw->reg;
394 	int ret;
395 
396 	while (mask) {
397 		if (fw->words == 0)
398 			return -EINVAL;
399 
400 		if (mask & 1) {
401 			ret = check_register(fw, reg);
402 			if (ret < 0)
403 				return ret;
404 
405 			fw->words--;
406 			fw->offset++;
407 		}
408 		mask >>= 1;
409 		reg++;
410 	}
411 
412 	return 0;
413 }
414 
check_incr(struct host1x_firewall * fw)415 static int check_incr(struct host1x_firewall *fw)
416 {
417 	u32 count = fw->count;
418 	u32 reg = fw->reg;
419 	int ret;
420 
421 	while (count) {
422 		if (fw->words == 0)
423 			return -EINVAL;
424 
425 		ret = check_register(fw, reg);
426 		if (ret < 0)
427 			return ret;
428 
429 		reg++;
430 		fw->words--;
431 		fw->offset++;
432 		count--;
433 	}
434 
435 	return 0;
436 }
437 
check_nonincr(struct host1x_firewall * fw)438 static int check_nonincr(struct host1x_firewall *fw)
439 {
440 	u32 count = fw->count;
441 	int ret;
442 
443 	while (count) {
444 		if (fw->words == 0)
445 			return -EINVAL;
446 
447 		ret = check_register(fw, fw->reg);
448 		if (ret < 0)
449 			return ret;
450 
451 		fw->words--;
452 		fw->offset++;
453 		count--;
454 	}
455 
456 	return 0;
457 }
458 
validate(struct host1x_firewall * fw,struct host1x_job_gather * g)459 static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
460 {
461 	u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped +
462 		(g->offset / sizeof(u32));
463 	u32 job_class = fw->class;
464 	int err = 0;
465 
466 	fw->words = g->words;
467 	fw->cmdbuf = g->bo;
468 	fw->offset = 0;
469 
470 	while (fw->words && !err) {
471 		u32 word = cmdbuf_base[fw->offset];
472 		u32 opcode = (word & 0xf0000000) >> 28;
473 
474 		fw->mask = 0;
475 		fw->reg = 0;
476 		fw->count = 0;
477 		fw->words--;
478 		fw->offset++;
479 
480 		switch (opcode) {
481 		case 0:
482 			fw->class = word >> 6 & 0x3ff;
483 			fw->mask = word & 0x3f;
484 			fw->reg = word >> 16 & 0xfff;
485 			err = check_class(fw, job_class);
486 			if (!err)
487 				err = check_mask(fw);
488 			if (err)
489 				goto out;
490 			break;
491 		case 1:
492 			fw->reg = word >> 16 & 0xfff;
493 			fw->count = word & 0xffff;
494 			err = check_incr(fw);
495 			if (err)
496 				goto out;
497 			break;
498 
499 		case 2:
500 			fw->reg = word >> 16 & 0xfff;
501 			fw->count = word & 0xffff;
502 			err = check_nonincr(fw);
503 			if (err)
504 				goto out;
505 			break;
506 
507 		case 3:
508 			fw->mask = word & 0xffff;
509 			fw->reg = word >> 16 & 0xfff;
510 			err = check_mask(fw);
511 			if (err)
512 				goto out;
513 			break;
514 		case 4:
515 		case 14:
516 			break;
517 		default:
518 			err = -EINVAL;
519 			break;
520 		}
521 	}
522 
523 out:
524 	return err;
525 }
526 
copy_gathers(struct device * host,struct host1x_job * job,struct device * dev)527 static inline int copy_gathers(struct device *host, struct host1x_job *job,
528 			       struct device *dev)
529 {
530 	struct host1x_firewall fw;
531 	size_t size = 0;
532 	size_t offset = 0;
533 	unsigned int i;
534 
535 	fw.job = job;
536 	fw.dev = dev;
537 	fw.reloc = job->relocs;
538 	fw.num_relocs = job->num_relocs;
539 	fw.class = job->class;
540 
541 	for (i = 0; i < job->num_gathers; i++) {
542 		struct host1x_job_gather *g = &job->gathers[i];
543 
544 		size += g->words * sizeof(u32);
545 	}
546 
547 	/*
548 	 * Try a non-blocking allocation from a higher priority pools first,
549 	 * as awaiting for the allocation here is a major performance hit.
550 	 */
551 	job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
552 					       GFP_NOWAIT);
553 
554 	/* the higher priority allocation failed, try the generic-blocking */
555 	if (!job->gather_copy_mapped)
556 		job->gather_copy_mapped = dma_alloc_wc(host, size,
557 						       &job->gather_copy,
558 						       GFP_KERNEL);
559 	if (!job->gather_copy_mapped)
560 		return -ENOMEM;
561 
562 	job->gather_copy_size = size;
563 
564 	for (i = 0; i < job->num_gathers; i++) {
565 		struct host1x_job_gather *g = &job->gathers[i];
566 		void *gather;
567 
568 		/* Copy the gather */
569 		gather = host1x_bo_mmap(g->bo);
570 		memcpy(job->gather_copy_mapped + offset, gather + g->offset,
571 		       g->words * sizeof(u32));
572 		host1x_bo_munmap(g->bo, gather);
573 
574 		/* Store the location in the buffer */
575 		g->base = job->gather_copy;
576 		g->offset = offset;
577 
578 		/* Validate the job */
579 		if (validate(&fw, g))
580 			return -EINVAL;
581 
582 		offset += g->words * sizeof(u32);
583 	}
584 
585 	/* No relocs should remain at this point */
586 	if (fw.num_relocs)
587 		return -EINVAL;
588 
589 	return 0;
590 }
591 
host1x_job_pin(struct host1x_job * job,struct device * dev)592 int host1x_job_pin(struct host1x_job *job, struct device *dev)
593 {
594 	int err;
595 	unsigned int i, j;
596 	struct host1x *host = dev_get_drvdata(dev->parent);
597 
598 	/* pin memory */
599 	err = pin_job(host, job);
600 	if (err)
601 		goto out;
602 
603 	if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
604 		err = copy_gathers(host->dev, job, dev);
605 		if (err)
606 			goto out;
607 	}
608 
609 	/* patch gathers */
610 	for (i = 0; i < job->num_gathers; i++) {
611 		struct host1x_job_gather *g = &job->gathers[i];
612 
613 		/* process each gather mem only once */
614 		if (g->handled)
615 			continue;
616 
617 		/* copy_gathers() sets gathers base if firewall is enabled */
618 		if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
619 			g->base = job->gather_addr_phys[i];
620 
621 		for (j = i + 1; j < job->num_gathers; j++) {
622 			if (job->gathers[j].bo == g->bo) {
623 				job->gathers[j].handled = true;
624 				job->gathers[j].base = g->base;
625 			}
626 		}
627 
628 		err = do_relocs(job, g);
629 		if (err)
630 			break;
631 	}
632 
633 out:
634 	if (err)
635 		host1x_job_unpin(job);
636 	wmb();
637 
638 	return err;
639 }
640 EXPORT_SYMBOL(host1x_job_pin);
641 
host1x_job_unpin(struct host1x_job * job)642 void host1x_job_unpin(struct host1x_job *job)
643 {
644 	struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
645 	unsigned int i;
646 
647 	for (i = 0; i < job->num_unpins; i++) {
648 		struct host1x_job_unpin_data *unpin = &job->unpins[i];
649 		struct device *dev = unpin->dev ?: host->dev;
650 		struct sg_table *sgt = unpin->sgt;
651 
652 		if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
653 		    unpin->size && host->domain) {
654 			iommu_unmap(host->domain, job->addr_phys[i],
655 				    unpin->size);
656 			free_iova(&host->iova,
657 				iova_pfn(&host->iova, job->addr_phys[i]));
658 		}
659 
660 		if (unpin->dev && sgt)
661 			dma_unmap_sgtable(unpin->dev, sgt, unpin->dir, 0);
662 
663 		host1x_bo_unpin(dev, unpin->bo, sgt);
664 		host1x_bo_put(unpin->bo);
665 	}
666 
667 	job->num_unpins = 0;
668 
669 	if (job->gather_copy_size)
670 		dma_free_wc(host->dev, job->gather_copy_size,
671 			    job->gather_copy_mapped, job->gather_copy);
672 }
673 EXPORT_SYMBOL(host1x_job_unpin);
674 
675 /*
676  * Debug routine used to dump job entries
677  */
host1x_job_dump(struct device * dev,struct host1x_job * job)678 void host1x_job_dump(struct device *dev, struct host1x_job *job)
679 {
680 	dev_dbg(dev, "    SYNCPT_ID   %d\n", job->syncpt->id);
681 	dev_dbg(dev, "    SYNCPT_VAL  %d\n", job->syncpt_end);
682 	dev_dbg(dev, "    FIRST_GET   0x%x\n", job->first_get);
683 	dev_dbg(dev, "    TIMEOUT     %d\n", job->timeout);
684 	dev_dbg(dev, "    NUM_SLOTS   %d\n", job->num_slots);
685 	dev_dbg(dev, "    NUM_HANDLES %d\n", job->num_unpins);
686 }
687