xref: /linux/drivers/gpu/host1x/dev.c (revision c6fbb759)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Tegra host1x driver
4  *
5  * Copyright (c) 2010-2013, NVIDIA Corporation.
6  */
7 
8 #include <linux/clk.h>
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/io.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/of_device.h>
15 #include <linux/of.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/slab.h>
18 
19 #include <soc/tegra/common.h>
20 
21 #define CREATE_TRACE_POINTS
22 #include <trace/events/host1x.h>
23 #undef CREATE_TRACE_POINTS
24 
25 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
26 #include <asm/dma-iommu.h>
27 #endif
28 
29 #include "bus.h"
30 #include "channel.h"
31 #include "context.h"
32 #include "debug.h"
33 #include "dev.h"
34 #include "intr.h"
35 
36 #include "hw/host1x01.h"
37 #include "hw/host1x02.h"
38 #include "hw/host1x04.h"
39 #include "hw/host1x05.h"
40 #include "hw/host1x06.h"
41 #include "hw/host1x07.h"
42 #include "hw/host1x08.h"
43 
44 void host1x_common_writel(struct host1x *host1x, u32 v, u32 r)
45 {
46 	writel(v, host1x->common_regs + r);
47 }
48 
49 void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r)
50 {
51 	writel(v, host1x->hv_regs + r);
52 }
53 
54 u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r)
55 {
56 	return readl(host1x->hv_regs + r);
57 }
58 
59 void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
60 {
61 	void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
62 
63 	writel(v, sync_regs + r);
64 }
65 
66 u32 host1x_sync_readl(struct host1x *host1x, u32 r)
67 {
68 	void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
69 
70 	return readl(sync_regs + r);
71 }
72 
73 void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r)
74 {
75 	writel(v, ch->regs + r);
76 }
77 
78 u32 host1x_ch_readl(struct host1x_channel *ch, u32 r)
79 {
80 	return readl(ch->regs + r);
81 }
82 
83 static const struct host1x_info host1x01_info = {
84 	.nb_channels = 8,
85 	.nb_pts = 32,
86 	.nb_mlocks = 16,
87 	.nb_bases = 8,
88 	.init = host1x01_init,
89 	.sync_offset = 0x3000,
90 	.dma_mask = DMA_BIT_MASK(32),
91 	.has_wide_gather = false,
92 	.has_hypervisor = false,
93 	.num_sid_entries = 0,
94 	.sid_table = NULL,
95 	.reserve_vblank_syncpts = true,
96 };
97 
98 static const struct host1x_info host1x02_info = {
99 	.nb_channels = 9,
100 	.nb_pts = 32,
101 	.nb_mlocks = 16,
102 	.nb_bases = 12,
103 	.init = host1x02_init,
104 	.sync_offset = 0x3000,
105 	.dma_mask = DMA_BIT_MASK(32),
106 	.has_wide_gather = false,
107 	.has_hypervisor = false,
108 	.num_sid_entries = 0,
109 	.sid_table = NULL,
110 	.reserve_vblank_syncpts = true,
111 };
112 
113 static const struct host1x_info host1x04_info = {
114 	.nb_channels = 12,
115 	.nb_pts = 192,
116 	.nb_mlocks = 16,
117 	.nb_bases = 64,
118 	.init = host1x04_init,
119 	.sync_offset = 0x2100,
120 	.dma_mask = DMA_BIT_MASK(34),
121 	.has_wide_gather = false,
122 	.has_hypervisor = false,
123 	.num_sid_entries = 0,
124 	.sid_table = NULL,
125 	.reserve_vblank_syncpts = false,
126 };
127 
128 static const struct host1x_info host1x05_info = {
129 	.nb_channels = 14,
130 	.nb_pts = 192,
131 	.nb_mlocks = 16,
132 	.nb_bases = 64,
133 	.init = host1x05_init,
134 	.sync_offset = 0x2100,
135 	.dma_mask = DMA_BIT_MASK(34),
136 	.has_wide_gather = false,
137 	.has_hypervisor = false,
138 	.num_sid_entries = 0,
139 	.sid_table = NULL,
140 	.reserve_vblank_syncpts = false,
141 };
142 
143 static const struct host1x_sid_entry tegra186_sid_table[] = {
144 	{
145 		/* VIC */
146 		.base = 0x1af0,
147 		.offset = 0x30,
148 		.limit = 0x34
149 	},
150 	{
151 		/* NVDEC */
152 		.base = 0x1b00,
153 		.offset = 0x30,
154 		.limit = 0x34
155 	},
156 };
157 
158 static const struct host1x_info host1x06_info = {
159 	.nb_channels = 63,
160 	.nb_pts = 576,
161 	.nb_mlocks = 24,
162 	.nb_bases = 16,
163 	.init = host1x06_init,
164 	.sync_offset = 0x0,
165 	.dma_mask = DMA_BIT_MASK(40),
166 	.has_wide_gather = true,
167 	.has_hypervisor = true,
168 	.num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
169 	.sid_table = tegra186_sid_table,
170 	.reserve_vblank_syncpts = false,
171 };
172 
173 static const struct host1x_sid_entry tegra194_sid_table[] = {
174 	{
175 		/* VIC */
176 		.base = 0x1af0,
177 		.offset = 0x30,
178 		.limit = 0x34
179 	},
180 	{
181 		/* NVDEC */
182 		.base = 0x1b00,
183 		.offset = 0x30,
184 		.limit = 0x34
185 	},
186 	{
187 		/* NVDEC1 */
188 		.base = 0x1bc0,
189 		.offset = 0x30,
190 		.limit = 0x34
191 	},
192 };
193 
194 static const struct host1x_info host1x07_info = {
195 	.nb_channels = 63,
196 	.nb_pts = 704,
197 	.nb_mlocks = 32,
198 	.nb_bases = 0,
199 	.init = host1x07_init,
200 	.sync_offset = 0x0,
201 	.dma_mask = DMA_BIT_MASK(40),
202 	.has_wide_gather = true,
203 	.has_hypervisor = true,
204 	.num_sid_entries = ARRAY_SIZE(tegra194_sid_table),
205 	.sid_table = tegra194_sid_table,
206 	.reserve_vblank_syncpts = false,
207 };
208 
209 /*
210  * Tegra234 has two stream ID protection tables, one for setting stream IDs
211  * through the channel path via SETSTREAMID, and one for setting them via
212  * MMIO. We program each engine's data stream ID in the channel path table
213  * and firmware stream ID in the MMIO path table.
214  */
215 static const struct host1x_sid_entry tegra234_sid_table[] = {
216 	{
217 		/* VIC channel */
218 		.base = 0x17b8,
219 		.offset = 0x30,
220 		.limit = 0x30
221 	},
222 	{
223 		/* VIC MMIO */
224 		.base = 0x1688,
225 		.offset = 0x34,
226 		.limit = 0x34
227 	},
228 };
229 
230 static const struct host1x_info host1x08_info = {
231 	.nb_channels = 63,
232 	.nb_pts = 1024,
233 	.nb_mlocks = 24,
234 	.nb_bases = 0,
235 	.init = host1x08_init,
236 	.sync_offset = 0x0,
237 	.dma_mask = DMA_BIT_MASK(40),
238 	.has_wide_gather = true,
239 	.has_hypervisor = true,
240 	.has_common = true,
241 	.num_sid_entries = ARRAY_SIZE(tegra234_sid_table),
242 	.sid_table = tegra234_sid_table,
243 	.streamid_vm_table = { 0x1004, 128 },
244 	.classid_vm_table = { 0x1404, 25 },
245 	.mmio_vm_table = { 0x1504, 25 },
246 	.reserve_vblank_syncpts = false,
247 };
248 
249 static const struct of_device_id host1x_of_match[] = {
250 	{ .compatible = "nvidia,tegra234-host1x", .data = &host1x08_info, },
251 	{ .compatible = "nvidia,tegra194-host1x", .data = &host1x07_info, },
252 	{ .compatible = "nvidia,tegra186-host1x", .data = &host1x06_info, },
253 	{ .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
254 	{ .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
255 	{ .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
256 	{ .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
257 	{ .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
258 	{ },
259 };
260 MODULE_DEVICE_TABLE(of, host1x_of_match);
261 
262 static void host1x_setup_virtualization_tables(struct host1x *host)
263 {
264 	const struct host1x_info *info = host->info;
265 	unsigned int i;
266 
267 	if (!info->has_hypervisor)
268 		return;
269 
270 	for (i = 0; i < info->num_sid_entries; i++) {
271 		const struct host1x_sid_entry *entry = &info->sid_table[i];
272 
273 		host1x_hypervisor_writel(host, entry->offset, entry->base);
274 		host1x_hypervisor_writel(host, entry->limit, entry->base + 4);
275 	}
276 
277 	for (i = 0; i < info->streamid_vm_table.count; i++) {
278 		/* Allow access to all stream IDs to all VMs. */
279 		host1x_hypervisor_writel(host, 0xff, info->streamid_vm_table.base + 4 * i);
280 	}
281 
282 	for (i = 0; i < info->classid_vm_table.count; i++) {
283 		/* Allow access to all classes to all VMs. */
284 		host1x_hypervisor_writel(host, 0xff, info->classid_vm_table.base + 4 * i);
285 	}
286 
287 	for (i = 0; i < info->mmio_vm_table.count; i++) {
288 		/* Use VM1 (that's us) as originator VMID for engine MMIO accesses. */
289 		host1x_hypervisor_writel(host, 0x1, info->mmio_vm_table.base + 4 * i);
290 	}
291 }
292 
293 static bool host1x_wants_iommu(struct host1x *host1x)
294 {
295 	/*
296 	 * If we support addressing a maximum of 32 bits of physical memory
297 	 * and if the host1x firewall is enabled, there's no need to enable
298 	 * IOMMU support. This can happen for example on Tegra20, Tegra30
299 	 * and Tegra114.
300 	 *
301 	 * Tegra124 and later can address up to 34 bits of physical memory and
302 	 * many platforms come equipped with more than 2 GiB of system memory,
303 	 * which requires crossing the 4 GiB boundary. But there's a catch: on
304 	 * SoCs before Tegra186 (i.e. Tegra124 and Tegra210), the host1x can
305 	 * only address up to 32 bits of memory in GATHER opcodes, which means
306 	 * that command buffers need to either be in the first 2 GiB of system
307 	 * memory (which could quickly lead to memory exhaustion), or command
308 	 * buffers need to be treated differently from other buffers (which is
309 	 * not possible with the current ABI).
310 	 *
311 	 * A third option is to use the IOMMU in these cases to make sure all
312 	 * buffers will be mapped into a 32-bit IOVA space that host1x can
313 	 * address. This allows all of the system memory to be used and works
314 	 * within the limitations of the host1x on these SoCs.
315 	 *
316 	 * In summary, default to enable IOMMU on Tegra124 and later. For any
317 	 * of the earlier SoCs, only use the IOMMU for additional safety when
318 	 * the host1x firewall is disabled.
319 	 */
320 	if (host1x->info->dma_mask <= DMA_BIT_MASK(32)) {
321 		if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
322 			return false;
323 	}
324 
325 	return true;
326 }
327 
328 static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
329 {
330 	struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
331 	int err;
332 
333 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
334 	if (host->dev->archdata.mapping) {
335 		struct dma_iommu_mapping *mapping =
336 				to_dma_iommu_mapping(host->dev);
337 		arm_iommu_detach_device(host->dev);
338 		arm_iommu_release_mapping(mapping);
339 
340 		domain = iommu_get_domain_for_dev(host->dev);
341 	}
342 #endif
343 
344 	/*
345 	 * We may not always want to enable IOMMU support (for example if the
346 	 * host1x firewall is already enabled and we don't support addressing
347 	 * more than 32 bits of physical memory), so check for that first.
348 	 *
349 	 * Similarly, if host1x is already attached to an IOMMU (via the DMA
350 	 * API), don't try to attach again.
351 	 */
352 	if (!host1x_wants_iommu(host) || domain)
353 		return domain;
354 
355 	host->group = iommu_group_get(host->dev);
356 	if (host->group) {
357 		struct iommu_domain_geometry *geometry;
358 		dma_addr_t start, end;
359 		unsigned long order;
360 
361 		err = iova_cache_get();
362 		if (err < 0)
363 			goto put_group;
364 
365 		host->domain = iommu_domain_alloc(&platform_bus_type);
366 		if (!host->domain) {
367 			err = -ENOMEM;
368 			goto put_cache;
369 		}
370 
371 		err = iommu_attach_group(host->domain, host->group);
372 		if (err) {
373 			if (err == -ENODEV)
374 				err = 0;
375 
376 			goto free_domain;
377 		}
378 
379 		geometry = &host->domain->geometry;
380 		start = geometry->aperture_start & host->info->dma_mask;
381 		end = geometry->aperture_end & host->info->dma_mask;
382 
383 		order = __ffs(host->domain->pgsize_bitmap);
384 		init_iova_domain(&host->iova, 1UL << order, start >> order);
385 		host->iova_end = end;
386 
387 		domain = host->domain;
388 	}
389 
390 	return domain;
391 
392 free_domain:
393 	iommu_domain_free(host->domain);
394 	host->domain = NULL;
395 put_cache:
396 	iova_cache_put();
397 put_group:
398 	iommu_group_put(host->group);
399 	host->group = NULL;
400 
401 	return ERR_PTR(err);
402 }
403 
404 static int host1x_iommu_init(struct host1x *host)
405 {
406 	u64 mask = host->info->dma_mask;
407 	struct iommu_domain *domain;
408 	int err;
409 
410 	domain = host1x_iommu_attach(host);
411 	if (IS_ERR(domain)) {
412 		err = PTR_ERR(domain);
413 		dev_err(host->dev, "failed to attach to IOMMU: %d\n", err);
414 		return err;
415 	}
416 
417 	/*
418 	 * If we're not behind an IOMMU make sure we don't get push buffers
419 	 * that are allocated outside of the range addressable by the GATHER
420 	 * opcode.
421 	 *
422 	 * Newer generations of Tegra (Tegra186 and later) support a wide
423 	 * variant of the GATHER opcode that allows addressing more bits.
424 	 */
425 	if (!domain && !host->info->has_wide_gather)
426 		mask = DMA_BIT_MASK(32);
427 
428 	err = dma_coerce_mask_and_coherent(host->dev, mask);
429 	if (err < 0) {
430 		dev_err(host->dev, "failed to set DMA mask: %d\n", err);
431 		return err;
432 	}
433 
434 	return 0;
435 }
436 
437 static void host1x_iommu_exit(struct host1x *host)
438 {
439 	if (host->domain) {
440 		put_iova_domain(&host->iova);
441 		iommu_detach_group(host->domain, host->group);
442 
443 		iommu_domain_free(host->domain);
444 		host->domain = NULL;
445 
446 		iova_cache_put();
447 
448 		iommu_group_put(host->group);
449 		host->group = NULL;
450 	}
451 }
452 
453 static int host1x_get_resets(struct host1x *host)
454 {
455 	int err;
456 
457 	host->resets[0].id = "mc";
458 	host->resets[1].id = "host1x";
459 	host->nresets = ARRAY_SIZE(host->resets);
460 
461 	err = devm_reset_control_bulk_get_optional_exclusive_released(
462 				host->dev, host->nresets, host->resets);
463 	if (err) {
464 		dev_err(host->dev, "failed to get reset: %d\n", err);
465 		return err;
466 	}
467 
468 	return 0;
469 }
470 
471 static int host1x_probe(struct platform_device *pdev)
472 {
473 	struct host1x *host;
474 	int syncpt_irq;
475 	int err;
476 
477 	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
478 	if (!host)
479 		return -ENOMEM;
480 
481 	host->info = of_device_get_match_data(&pdev->dev);
482 
483 	if (host->info->has_hypervisor) {
484 		host->regs = devm_platform_ioremap_resource_byname(pdev, "vm");
485 		if (IS_ERR(host->regs))
486 			return PTR_ERR(host->regs);
487 
488 		host->hv_regs = devm_platform_ioremap_resource_byname(pdev, "hypervisor");
489 		if (IS_ERR(host->hv_regs))
490 			return PTR_ERR(host->hv_regs);
491 
492 		if (host->info->has_common) {
493 			host->common_regs = devm_platform_ioremap_resource_byname(pdev, "common");
494 			if (IS_ERR(host->common_regs))
495 				return PTR_ERR(host->common_regs);
496 		}
497 	} else {
498 		host->regs = devm_platform_ioremap_resource(pdev, 0);
499 		if (IS_ERR(host->regs))
500 			return PTR_ERR(host->regs);
501 	}
502 
503 	syncpt_irq = platform_get_irq(pdev, 0);
504 	if (syncpt_irq < 0)
505 		return syncpt_irq;
506 
507 	mutex_init(&host->devices_lock);
508 	INIT_LIST_HEAD(&host->devices);
509 	INIT_LIST_HEAD(&host->list);
510 	host->dev = &pdev->dev;
511 
512 	/* set common host1x device data */
513 	platform_set_drvdata(pdev, host);
514 
515 	host->dev->dma_parms = &host->dma_parms;
516 	dma_set_max_seg_size(host->dev, UINT_MAX);
517 
518 	if (host->info->init) {
519 		err = host->info->init(host);
520 		if (err)
521 			return err;
522 	}
523 
524 	host->clk = devm_clk_get(&pdev->dev, NULL);
525 	if (IS_ERR(host->clk)) {
526 		err = PTR_ERR(host->clk);
527 
528 		if (err != -EPROBE_DEFER)
529 			dev_err(&pdev->dev, "failed to get clock: %d\n", err);
530 
531 		return err;
532 	}
533 
534 	err = host1x_get_resets(host);
535 	if (err)
536 		return err;
537 
538 	host1x_bo_cache_init(&host->cache);
539 
540 	err = host1x_iommu_init(host);
541 	if (err < 0) {
542 		dev_err(&pdev->dev, "failed to setup IOMMU: %d\n", err);
543 		goto destroy_cache;
544 	}
545 
546 	err = host1x_channel_list_init(&host->channel_list,
547 				       host->info->nb_channels);
548 	if (err) {
549 		dev_err(&pdev->dev, "failed to initialize channel list\n");
550 		goto iommu_exit;
551 	}
552 
553 	err = host1x_memory_context_list_init(host);
554 	if (err) {
555 		dev_err(&pdev->dev, "failed to initialize context list\n");
556 		goto free_channels;
557 	}
558 
559 	err = host1x_syncpt_init(host);
560 	if (err) {
561 		dev_err(&pdev->dev, "failed to initialize syncpts\n");
562 		goto free_contexts;
563 	}
564 
565 	err = host1x_intr_init(host, syncpt_irq);
566 	if (err) {
567 		dev_err(&pdev->dev, "failed to initialize interrupts\n");
568 		goto deinit_syncpt;
569 	}
570 
571 	pm_runtime_enable(&pdev->dev);
572 
573 	err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
574 	if (err)
575 		goto pm_disable;
576 
577 	/* the driver's code isn't ready yet for the dynamic RPM */
578 	err = pm_runtime_resume_and_get(&pdev->dev);
579 	if (err)
580 		goto pm_disable;
581 
582 	host1x_debug_init(host);
583 
584 	err = host1x_register(host);
585 	if (err < 0)
586 		goto deinit_debugfs;
587 
588 	err = devm_of_platform_populate(&pdev->dev);
589 	if (err < 0)
590 		goto unregister;
591 
592 	return 0;
593 
594 unregister:
595 	host1x_unregister(host);
596 deinit_debugfs:
597 	host1x_debug_deinit(host);
598 
599 	pm_runtime_put_sync_suspend(&pdev->dev);
600 pm_disable:
601 	pm_runtime_disable(&pdev->dev);
602 
603 	host1x_intr_deinit(host);
604 deinit_syncpt:
605 	host1x_syncpt_deinit(host);
606 free_contexts:
607 	host1x_memory_context_list_free(&host->context_list);
608 free_channels:
609 	host1x_channel_list_free(&host->channel_list);
610 iommu_exit:
611 	host1x_iommu_exit(host);
612 destroy_cache:
613 	host1x_bo_cache_destroy(&host->cache);
614 
615 	return err;
616 }
617 
618 static int host1x_remove(struct platform_device *pdev)
619 {
620 	struct host1x *host = platform_get_drvdata(pdev);
621 
622 	host1x_unregister(host);
623 	host1x_debug_deinit(host);
624 
625 	pm_runtime_force_suspend(&pdev->dev);
626 
627 	host1x_intr_deinit(host);
628 	host1x_syncpt_deinit(host);
629 	host1x_memory_context_list_free(&host->context_list);
630 	host1x_channel_list_free(&host->channel_list);
631 	host1x_iommu_exit(host);
632 	host1x_bo_cache_destroy(&host->cache);
633 
634 	return 0;
635 }
636 
637 static int __maybe_unused host1x_runtime_suspend(struct device *dev)
638 {
639 	struct host1x *host = dev_get_drvdata(dev);
640 	int err;
641 
642 	host1x_intr_stop(host);
643 	host1x_syncpt_save(host);
644 
645 	err = reset_control_bulk_assert(host->nresets, host->resets);
646 	if (err) {
647 		dev_err(dev, "failed to assert reset: %d\n", err);
648 		goto resume_host1x;
649 	}
650 
651 	usleep_range(1000, 2000);
652 
653 	clk_disable_unprepare(host->clk);
654 	reset_control_bulk_release(host->nresets, host->resets);
655 
656 	return 0;
657 
658 resume_host1x:
659 	host1x_setup_virtualization_tables(host);
660 	host1x_syncpt_restore(host);
661 	host1x_intr_start(host);
662 
663 	return err;
664 }
665 
666 static int __maybe_unused host1x_runtime_resume(struct device *dev)
667 {
668 	struct host1x *host = dev_get_drvdata(dev);
669 	int err;
670 
671 	err = reset_control_bulk_acquire(host->nresets, host->resets);
672 	if (err) {
673 		dev_err(dev, "failed to acquire reset: %d\n", err);
674 		return err;
675 	}
676 
677 	err = clk_prepare_enable(host->clk);
678 	if (err) {
679 		dev_err(dev, "failed to enable clock: %d\n", err);
680 		goto release_reset;
681 	}
682 
683 	err = reset_control_bulk_deassert(host->nresets, host->resets);
684 	if (err < 0) {
685 		dev_err(dev, "failed to deassert reset: %d\n", err);
686 		goto disable_clk;
687 	}
688 
689 	host1x_setup_virtualization_tables(host);
690 	host1x_syncpt_restore(host);
691 	host1x_intr_start(host);
692 
693 	return 0;
694 
695 disable_clk:
696 	clk_disable_unprepare(host->clk);
697 release_reset:
698 	reset_control_bulk_release(host->nresets, host->resets);
699 
700 	return err;
701 }
702 
703 static const struct dev_pm_ops host1x_pm_ops = {
704 	SET_RUNTIME_PM_OPS(host1x_runtime_suspend, host1x_runtime_resume,
705 			   NULL)
706 	/* TODO: add system suspend-resume once driver will be ready for that */
707 };
708 
709 static struct platform_driver tegra_host1x_driver = {
710 	.driver = {
711 		.name = "tegra-host1x",
712 		.of_match_table = host1x_of_match,
713 		.pm = &host1x_pm_ops,
714 	},
715 	.probe = host1x_probe,
716 	.remove = host1x_remove,
717 };
718 
719 static struct platform_driver * const drivers[] = {
720 	&tegra_host1x_driver,
721 	&tegra_mipi_driver,
722 };
723 
724 static int __init tegra_host1x_init(void)
725 {
726 	int err;
727 
728 	err = bus_register(&host1x_bus_type);
729 	if (err < 0)
730 		return err;
731 
732 	err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
733 	if (err < 0)
734 		bus_unregister(&host1x_bus_type);
735 
736 	return err;
737 }
738 module_init(tegra_host1x_init);
739 
740 static void __exit tegra_host1x_exit(void)
741 {
742 	platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
743 	bus_unregister(&host1x_bus_type);
744 }
745 module_exit(tegra_host1x_exit);
746 
747 /**
748  * host1x_get_dma_mask() - query the supported DMA mask for host1x
749  * @host1x: host1x instance
750  *
751  * Note that this returns the supported DMA mask for host1x, which can be
752  * different from the applicable DMA mask under certain circumstances.
753  */
754 u64 host1x_get_dma_mask(struct host1x *host1x)
755 {
756 	return host1x->info->dma_mask;
757 }
758 EXPORT_SYMBOL(host1x_get_dma_mask);
759 
760 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
761 MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>");
762 MODULE_DESCRIPTION("Host1x driver for Tegra products");
763 MODULE_LICENSE("GPL");
764