xref: /linux/drivers/vfio/iommufd.c (revision 84b9b44b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
4  */
5 #include <linux/vfio.h>
6 #include <linux/iommufd.h>
7 
8 #include "vfio.h"
9 
10 MODULE_IMPORT_NS(IOMMUFD);
11 MODULE_IMPORT_NS(IOMMUFD_VFIO);
12 
13 int vfio_iommufd_bind(struct vfio_device *vdev, struct iommufd_ctx *ictx)
14 {
15 	u32 ioas_id;
16 	u32 device_id;
17 	int ret;
18 
19 	lockdep_assert_held(&vdev->dev_set->lock);
20 
21 	if (vfio_device_is_noiommu(vdev)) {
22 		if (!capable(CAP_SYS_RAWIO))
23 			return -EPERM;
24 
25 		/*
26 		 * Require no compat ioas to be assigned to proceed. The basic
27 		 * statement is that the user cannot have done something that
28 		 * implies they expected translation to exist
29 		 */
30 		if (!iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id))
31 			return -EPERM;
32 		return 0;
33 	}
34 
35 	ret = vdev->ops->bind_iommufd(vdev, ictx, &device_id);
36 	if (ret)
37 		return ret;
38 
39 	ret = iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id);
40 	if (ret)
41 		goto err_unbind;
42 	ret = vdev->ops->attach_ioas(vdev, &ioas_id);
43 	if (ret)
44 		goto err_unbind;
45 
46 	/*
47 	 * The legacy path has no way to return the device id or the selected
48 	 * pt_id
49 	 */
50 	return 0;
51 
52 err_unbind:
53 	if (vdev->ops->unbind_iommufd)
54 		vdev->ops->unbind_iommufd(vdev);
55 	return ret;
56 }
57 
58 void vfio_iommufd_unbind(struct vfio_device *vdev)
59 {
60 	lockdep_assert_held(&vdev->dev_set->lock);
61 
62 	if (vfio_device_is_noiommu(vdev))
63 		return;
64 
65 	if (vdev->ops->unbind_iommufd)
66 		vdev->ops->unbind_iommufd(vdev);
67 }
68 
69 /*
70  * The physical standard ops mean that the iommufd_device is bound to the
71  * physical device vdev->dev that was provided to vfio_init_group_dev(). Drivers
72  * using this ops set should call vfio_register_group_dev()
73  */
74 int vfio_iommufd_physical_bind(struct vfio_device *vdev,
75 			       struct iommufd_ctx *ictx, u32 *out_device_id)
76 {
77 	struct iommufd_device *idev;
78 
79 	idev = iommufd_device_bind(ictx, vdev->dev, out_device_id);
80 	if (IS_ERR(idev))
81 		return PTR_ERR(idev);
82 	vdev->iommufd_device = idev;
83 	return 0;
84 }
85 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_bind);
86 
87 void vfio_iommufd_physical_unbind(struct vfio_device *vdev)
88 {
89 	lockdep_assert_held(&vdev->dev_set->lock);
90 
91 	if (vdev->iommufd_attached) {
92 		iommufd_device_detach(vdev->iommufd_device);
93 		vdev->iommufd_attached = false;
94 	}
95 	iommufd_device_unbind(vdev->iommufd_device);
96 	vdev->iommufd_device = NULL;
97 }
98 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_unbind);
99 
100 int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
101 {
102 	int rc;
103 
104 	rc = iommufd_device_attach(vdev->iommufd_device, pt_id);
105 	if (rc)
106 		return rc;
107 	vdev->iommufd_attached = true;
108 	return 0;
109 }
110 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_attach_ioas);
111 
112 /*
113  * The emulated standard ops mean that vfio_device is going to use the
114  * "mdev path" and will call vfio_pin_pages()/vfio_dma_rw(). Drivers using this
115  * ops set should call vfio_register_emulated_iommu_dev(). Drivers that do
116  * not call vfio_pin_pages()/vfio_dma_rw() have no need to provide dma_unmap.
117  */
118 
119 static void vfio_emulated_unmap(void *data, unsigned long iova,
120 				unsigned long length)
121 {
122 	struct vfio_device *vdev = data;
123 
124 	if (vdev->ops->dma_unmap)
125 		vdev->ops->dma_unmap(vdev, iova, length);
126 }
127 
128 static const struct iommufd_access_ops vfio_user_ops = {
129 	.needs_pin_pages = 1,
130 	.unmap = vfio_emulated_unmap,
131 };
132 
133 int vfio_iommufd_emulated_bind(struct vfio_device *vdev,
134 			       struct iommufd_ctx *ictx, u32 *out_device_id)
135 {
136 	struct iommufd_access *user;
137 
138 	lockdep_assert_held(&vdev->dev_set->lock);
139 
140 	user = iommufd_access_create(ictx, &vfio_user_ops, vdev, out_device_id);
141 	if (IS_ERR(user))
142 		return PTR_ERR(user);
143 	vdev->iommufd_access = user;
144 	return 0;
145 }
146 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_bind);
147 
148 void vfio_iommufd_emulated_unbind(struct vfio_device *vdev)
149 {
150 	lockdep_assert_held(&vdev->dev_set->lock);
151 
152 	if (vdev->iommufd_access) {
153 		iommufd_access_destroy(vdev->iommufd_access);
154 		vdev->iommufd_attached = false;
155 		vdev->iommufd_access = NULL;
156 	}
157 }
158 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_unbind);
159 
160 int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
161 {
162 	int rc;
163 
164 	lockdep_assert_held(&vdev->dev_set->lock);
165 
166 	if (vdev->iommufd_attached)
167 		return -EBUSY;
168 	rc = iommufd_access_attach(vdev->iommufd_access, *pt_id);
169 	if (rc)
170 		return rc;
171 	vdev->iommufd_attached = true;
172 	return 0;
173 }
174 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_attach_ioas);
175