xref: /linux/arch/x86/kernel/pci-dma.c (revision f9a38ea5)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
20a0f0d8bSChristoph Hellwig #include <linux/dma-map-ops.h>
3ea8c64acSChristoph Hellwig #include <linux/dma-direct.h>
4c53c47aaSJoerg Roedel #include <linux/iommu.h>
5cb5867a5SGlauber Costa #include <linux/dmar.h>
669c60c88SPaul Gortmaker #include <linux/export.h>
757c8a661SMike Rapoport #include <linux/memblock.h>
85a0e3ad6STejun Heo #include <linux/gfp.h>
9bca5c096SGlauber Costa #include <linux/pci.h>
1078013eaaSChristoph Hellwig #include <linux/amd-iommu.h>
11cb5867a5SGlauber Costa 
12116890d5SGlauber Costa #include <asm/proto.h>
13116890d5SGlauber Costa #include <asm/dma.h>
1446a7fa27SFUJITA Tomonori #include <asm/iommu.h>
151d9b16d1SJoerg Roedel #include <asm/gart.h>
16b4941a9aSIngo Molnar #include <asm/x86_init.h>
1778013eaaSChristoph Hellwig 
1878013eaaSChristoph Hellwig #include <xen/xen.h>
1978013eaaSChristoph Hellwig #include <xen/swiotlb-xen.h>
20459121c9SGlauber Costa 
210ead51c3SChristoph Hellwig static bool disable_dac_quirk __read_mostly;
223b15e581SFenghua Yu 
23356da6d0SChristoph Hellwig const struct dma_map_ops *dma_ops;
2485c246eeSGlauber Costa EXPORT_SYMBOL(dma_ops);
2585c246eeSGlauber Costa 
26f9c258deSGlauber Costa #ifdef CONFIG_IOMMU_DEBUG
27f9c258deSGlauber Costa int panic_on_overflow __read_mostly = 1;
28f9c258deSGlauber Costa int force_iommu __read_mostly = 1;
29f9c258deSGlauber Costa #else
30f9c258deSGlauber Costa int panic_on_overflow __read_mostly = 0;
31f9c258deSGlauber Costa int force_iommu __read_mostly = 0;
32f9c258deSGlauber Costa #endif
33f9c258deSGlauber Costa 
34fae9a0d8SGlauber Costa int iommu_merge __read_mostly = 0;
35fae9a0d8SGlauber Costa 
36fae9a0d8SGlauber Costa int no_iommu __read_mostly;
37fae9a0d8SGlauber Costa /* Set this to 1 if there is a HW IOMMU in the system */
38fae9a0d8SGlauber Costa int iommu_detected __read_mostly = 0;
39fae9a0d8SGlauber Costa 
4078013eaaSChristoph Hellwig #ifdef CONFIG_SWIOTLB
4178013eaaSChristoph Hellwig bool x86_swiotlb_enable;
42c6af2aa9SChristoph Hellwig static unsigned int x86_swiotlb_flags;
4378013eaaSChristoph Hellwig 
pci_swiotlb_detect(void)4478013eaaSChristoph Hellwig static void __init pci_swiotlb_detect(void)
4578013eaaSChristoph Hellwig {
4678013eaaSChristoph Hellwig 	/* don't initialize swiotlb if iommu=off (no_iommu=1) */
4778013eaaSChristoph Hellwig 	if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN)
4878013eaaSChristoph Hellwig 		x86_swiotlb_enable = true;
4978013eaaSChristoph Hellwig 
5078013eaaSChristoph Hellwig 	/*
5178013eaaSChristoph Hellwig 	 * Set swiotlb to 1 so that bounce buffers are allocated and used for
5278013eaaSChristoph Hellwig 	 * devices that can't support DMA to encrypted memory.
5378013eaaSChristoph Hellwig 	 */
5478013eaaSChristoph Hellwig 	if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
5578013eaaSChristoph Hellwig 		x86_swiotlb_enable = true;
5678013eaaSChristoph Hellwig 
57a3e23092SChristoph Hellwig 	/*
58a3e23092SChristoph Hellwig 	 * Guest with guest memory encryption currently perform all DMA through
59a3e23092SChristoph Hellwig 	 * bounce buffers as the hypervisor can't access arbitrary VM memory
60a3e23092SChristoph Hellwig 	 * that is not explicitly shared with it.
61a3e23092SChristoph Hellwig 	 */
62c6af2aa9SChristoph Hellwig 	if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
6378013eaaSChristoph Hellwig 		x86_swiotlb_enable = true;
64c6af2aa9SChristoph Hellwig 		x86_swiotlb_flags |= SWIOTLB_FORCE;
65c6af2aa9SChristoph Hellwig 	}
6678013eaaSChristoph Hellwig }
6778013eaaSChristoph Hellwig #else
pci_swiotlb_detect(void)6878013eaaSChristoph Hellwig static inline void __init pci_swiotlb_detect(void)
6978013eaaSChristoph Hellwig {
7078013eaaSChristoph Hellwig }
71c6af2aa9SChristoph Hellwig #define x86_swiotlb_flags 0
7278013eaaSChristoph Hellwig #endif /* CONFIG_SWIOTLB */
7378013eaaSChristoph Hellwig 
7478013eaaSChristoph Hellwig #ifdef CONFIG_SWIOTLB_XEN
xen_swiotlb_enabled(void)75*f9a38ea5SChristoph Hellwig static bool xen_swiotlb_enabled(void)
76*f9a38ea5SChristoph Hellwig {
77*f9a38ea5SChristoph Hellwig 	return xen_initial_domain() || x86_swiotlb_enable ||
78*f9a38ea5SChristoph Hellwig 		(IS_ENABLED(CONFIG_XEN_PCIDEV_FRONTEND) && xen_pv_pci_possible);
79*f9a38ea5SChristoph Hellwig }
80*f9a38ea5SChristoph Hellwig 
pci_xen_swiotlb_init(void)8178013eaaSChristoph Hellwig static void __init pci_xen_swiotlb_init(void)
8278013eaaSChristoph Hellwig {
83*f9a38ea5SChristoph Hellwig 	if (!xen_swiotlb_enabled())
8478013eaaSChristoph Hellwig 		return;
8578013eaaSChristoph Hellwig 	x86_swiotlb_enable = true;
863f70356eSChristoph Hellwig 	x86_swiotlb_flags |= SWIOTLB_ANY;
873f70356eSChristoph Hellwig 	swiotlb_init_remap(true, x86_swiotlb_flags, xen_swiotlb_fixup);
8878013eaaSChristoph Hellwig 	dma_ops = &xen_swiotlb_dma_ops;
8978013eaaSChristoph Hellwig 	if (IS_ENABLED(CONFIG_PCI))
9078013eaaSChristoph Hellwig 		pci_request_acs();
9178013eaaSChristoph Hellwig }
9278013eaaSChristoph Hellwig #else
pci_xen_swiotlb_init(void)9378013eaaSChristoph Hellwig static inline void __init pci_xen_swiotlb_init(void)
9478013eaaSChristoph Hellwig {
9578013eaaSChristoph Hellwig }
9678013eaaSChristoph Hellwig #endif /* CONFIG_SWIOTLB_XEN */
97ee1f284fSKonrad Rzeszutek Wilk 
pci_iommu_alloc(void)98116890d5SGlauber Costa void __init pci_iommu_alloc(void)
99116890d5SGlauber Costa {
10078013eaaSChristoph Hellwig 	if (xen_pv_domain()) {
10178013eaaSChristoph Hellwig 		pci_xen_swiotlb_init();
10278013eaaSChristoph Hellwig 		return;
103116890d5SGlauber Costa 	}
10478013eaaSChristoph Hellwig 	pci_swiotlb_detect();
10578013eaaSChristoph Hellwig 	gart_iommu_hole_init();
10678013eaaSChristoph Hellwig 	amd_iommu_detect();
10778013eaaSChristoph Hellwig 	detect_intel_iommu();
108c6af2aa9SChristoph Hellwig 	swiotlb_init(x86_swiotlb_enable, x86_swiotlb_flags);
109ee1f284fSKonrad Rzeszutek Wilk }
1100a2b9a6eSMarek Szyprowski 
111fae9a0d8SGlauber Costa /*
112ff61f079SJonathan Corbet  * See <Documentation/arch/x86/x86_64/boot-options.rst> for the iommu kernel
113395cf969SPaul Bolle  * parameter documentation.
114fae9a0d8SGlauber Costa  */
iommu_setup(char * p)115fae9a0d8SGlauber Costa static __init int iommu_setup(char *p)
116fae9a0d8SGlauber Costa {
117fae9a0d8SGlauber Costa 	iommu_merge = 1;
118fae9a0d8SGlauber Costa 
119fae9a0d8SGlauber Costa 	if (!p)
120fae9a0d8SGlauber Costa 		return -EINVAL;
121fae9a0d8SGlauber Costa 
122fae9a0d8SGlauber Costa 	while (*p) {
123fae9a0d8SGlauber Costa 		if (!strncmp(p, "off", 3))
124fae9a0d8SGlauber Costa 			no_iommu = 1;
125fae9a0d8SGlauber Costa 		/* gart_parse_options has more force support */
126fae9a0d8SGlauber Costa 		if (!strncmp(p, "force", 5))
127fae9a0d8SGlauber Costa 			force_iommu = 1;
128fae9a0d8SGlauber Costa 		if (!strncmp(p, "noforce", 7)) {
129fae9a0d8SGlauber Costa 			iommu_merge = 0;
130fae9a0d8SGlauber Costa 			force_iommu = 0;
131fae9a0d8SGlauber Costa 		}
132fae9a0d8SGlauber Costa 
133fae9a0d8SGlauber Costa 		if (!strncmp(p, "biomerge", 8)) {
134fae9a0d8SGlauber Costa 			iommu_merge = 1;
135fae9a0d8SGlauber Costa 			force_iommu = 1;
136fae9a0d8SGlauber Costa 		}
137fae9a0d8SGlauber Costa 		if (!strncmp(p, "panic", 5))
138fae9a0d8SGlauber Costa 			panic_on_overflow = 1;
139fae9a0d8SGlauber Costa 		if (!strncmp(p, "nopanic", 7))
140fae9a0d8SGlauber Costa 			panic_on_overflow = 0;
141fae9a0d8SGlauber Costa 		if (!strncmp(p, "merge", 5)) {
142fae9a0d8SGlauber Costa 			iommu_merge = 1;
143fae9a0d8SGlauber Costa 			force_iommu = 1;
144fae9a0d8SGlauber Costa 		}
145fae9a0d8SGlauber Costa 		if (!strncmp(p, "nomerge", 7))
146fae9a0d8SGlauber Costa 			iommu_merge = 0;
147fae9a0d8SGlauber Costa 		if (!strncmp(p, "forcesac", 8))
14806e9552fSChristoph Hellwig 			pr_warn("forcesac option ignored.\n");
149fae9a0d8SGlauber Costa 		if (!strncmp(p, "allowdac", 8))
150098afd98SChristoph Hellwig 			pr_warn("allowdac option ignored.\n");
151fae9a0d8SGlauber Costa 		if (!strncmp(p, "nodac", 5))
152098afd98SChristoph Hellwig 			pr_warn("nodac option ignored.\n");
153fae9a0d8SGlauber Costa 		if (!strncmp(p, "usedac", 6)) {
1540ead51c3SChristoph Hellwig 			disable_dac_quirk = true;
155fae9a0d8SGlauber Costa 			return 1;
156fae9a0d8SGlauber Costa 		}
157fae9a0d8SGlauber Costa #ifdef CONFIG_SWIOTLB
158fae9a0d8SGlauber Costa 		if (!strncmp(p, "soft", 4))
15978013eaaSChristoph Hellwig 			x86_swiotlb_enable = true;
1603238c0c4SDavid Woodhouse #endif
16180286879SAlex Williamson 		if (!strncmp(p, "pt", 2))
162c53c47aaSJoerg Roedel 			iommu_set_default_passthrough(true);
16358d11317SOlof Johansson 		if (!strncmp(p, "nopt", 4))
164c53c47aaSJoerg Roedel 			iommu_set_default_translated(true);
165fae9a0d8SGlauber Costa 
166fae9a0d8SGlauber Costa 		gart_parse_options(p);
167fae9a0d8SGlauber Costa 
168fae9a0d8SGlauber Costa 		p += strcspn(p, ",");
169fae9a0d8SGlauber Costa 		if (*p == ',')
170fae9a0d8SGlauber Costa 			++p;
171fae9a0d8SGlauber Costa 	}
172fae9a0d8SGlauber Costa 	return 0;
173fae9a0d8SGlauber Costa }
174fae9a0d8SGlauber Costa early_param("iommu", iommu_setup);
175fae9a0d8SGlauber Costa 
pci_iommu_init(void)176cb5867a5SGlauber Costa static int __init pci_iommu_init(void)
177cb5867a5SGlauber Costa {
178d07c1be0SFUJITA Tomonori 	x86_init.iommu.iommu_init();
179d07c1be0SFUJITA Tomonori 
18078013eaaSChristoph Hellwig #ifdef CONFIG_SWIOTLB
18178013eaaSChristoph Hellwig 	/* An IOMMU turned us off. */
18278013eaaSChristoph Hellwig 	if (x86_swiotlb_enable) {
18378013eaaSChristoph Hellwig 		pr_info("PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
18478013eaaSChristoph Hellwig 		swiotlb_print_info();
18578013eaaSChristoph Hellwig 	} else {
18678013eaaSChristoph Hellwig 		swiotlb_exit();
187ee1f284fSKonrad Rzeszutek Wilk 	}
18878013eaaSChristoph Hellwig #endif
18975f1cdf1SFUJITA Tomonori 
190cb5867a5SGlauber Costa 	return 0;
191cb5867a5SGlauber Costa }
192cb5867a5SGlauber Costa /* Must execute after PCI subsystem */
1939a821b23SDavid Woodhouse rootfs_initcall(pci_iommu_init);
1943b15e581SFenghua Yu 
1953b15e581SFenghua Yu #ifdef CONFIG_PCI
1963b15e581SFenghua Yu /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
1973b15e581SFenghua Yu 
via_no_dac_cb(struct pci_dev * pdev,void * data)1980ead51c3SChristoph Hellwig static int via_no_dac_cb(struct pci_dev *pdev, void *data)
1990ead51c3SChristoph Hellwig {
200a7ba70f1SNicolas Saenz Julienne 	pdev->dev.bus_dma_limit = DMA_BIT_MASK(32);
2010ead51c3SChristoph Hellwig 	return 0;
2020ead51c3SChristoph Hellwig }
2030ead51c3SChristoph Hellwig 
via_no_dac(struct pci_dev * dev)204a18e3690SGreg Kroah-Hartman static void via_no_dac(struct pci_dev *dev)
2053b15e581SFenghua Yu {
2060ead51c3SChristoph Hellwig 	if (!disable_dac_quirk) {
20713bf7576SBjorn Helgaas 		dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
2080ead51c3SChristoph Hellwig 		pci_walk_bus(dev->subordinate, via_no_dac_cb, NULL);
2093b15e581SFenghua Yu 	}
2103b15e581SFenghua Yu }
211c484b241SYinghai Lu DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
212c484b241SYinghai Lu 				PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
2133b15e581SFenghua Yu #endif
214