1 // SPDX-License-Identifier: GPL-2.0-or-later
2  /* Management for virtio crypto devices (refer to adf_dev_mgr.c)
3   *
4   * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
5   */
6 
7 #include <linux/mutex.h>
8 #include <linux/list.h>
9 #include <linux/module.h>
10 
11 #include <uapi/linux/virtio_crypto.h>
12 #include "virtio_crypto_common.h"
13 
14 static LIST_HEAD(virtio_crypto_table);
15 static uint32_t num_devices;
16 
17 /* The table_lock protects the above global list and num_devices */
18 static DEFINE_MUTEX(table_lock);
19 
20 #define VIRTIO_CRYPTO_MAX_DEVICES 32
21 
22 
23 /*
24  * virtcrypto_devmgr_add_dev() - Add vcrypto_dev to the acceleration
25  * framework.
26  * @vcrypto_dev:  Pointer to virtio crypto device.
27  *
28  * Function adds virtio crypto device to the global list.
29  * To be used by virtio crypto device specific drivers.
30  *
31  * Return: 0 on success, error code othewise.
32  */
33 int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev)
34 {
35 	struct list_head *itr;
36 
37 	mutex_lock(&table_lock);
38 	if (num_devices == VIRTIO_CRYPTO_MAX_DEVICES) {
39 		pr_info("virtio_crypto: only support up to %d devices\n",
40 			    VIRTIO_CRYPTO_MAX_DEVICES);
41 		mutex_unlock(&table_lock);
42 		return -EFAULT;
43 	}
44 
45 	list_for_each(itr, &virtio_crypto_table) {
46 		struct virtio_crypto *ptr =
47 				list_entry(itr, struct virtio_crypto, list);
48 
49 		if (ptr == vcrypto_dev) {
50 			mutex_unlock(&table_lock);
51 			return -EEXIST;
52 		}
53 	}
54 	atomic_set(&vcrypto_dev->ref_count, 0);
55 	list_add_tail(&vcrypto_dev->list, &virtio_crypto_table);
56 	vcrypto_dev->dev_id = num_devices++;
57 	mutex_unlock(&table_lock);
58 	return 0;
59 }
60 
61 struct list_head *virtcrypto_devmgr_get_head(void)
62 {
63 	return &virtio_crypto_table;
64 }
65 
66 /*
67  * virtcrypto_devmgr_rm_dev() - Remove vcrypto_dev from the acceleration
68  * framework.
69  * @vcrypto_dev:  Pointer to virtio crypto device.
70  *
71  * Function removes virtio crypto device from the acceleration framework.
72  * To be used by virtio crypto device specific drivers.
73  *
74  * Return: void
75  */
76 void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev)
77 {
78 	mutex_lock(&table_lock);
79 	list_del(&vcrypto_dev->list);
80 	num_devices--;
81 	mutex_unlock(&table_lock);
82 }
83 
84 /*
85  * virtcrypto_devmgr_get_first()
86  *
87  * Function returns the first virtio crypto device from the acceleration
88  * framework.
89  *
90  * To be used by virtio crypto device specific drivers.
91  *
92  * Return: pointer to vcrypto_dev or NULL if not found.
93  */
94 struct virtio_crypto *virtcrypto_devmgr_get_first(void)
95 {
96 	struct virtio_crypto *dev = NULL;
97 
98 	mutex_lock(&table_lock);
99 	if (!list_empty(&virtio_crypto_table))
100 		dev = list_first_entry(&virtio_crypto_table,
101 					struct virtio_crypto,
102 				    list);
103 	mutex_unlock(&table_lock);
104 	return dev;
105 }
106 
107 /*
108  * virtcrypto_dev_in_use() - Check whether vcrypto_dev is currently in use
109  * @vcrypto_dev: Pointer to virtio crypto device.
110  *
111  * To be used by virtio crypto device specific drivers.
112  *
113  * Return: 1 when device is in use, 0 otherwise.
114  */
115 int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev)
116 {
117 	return atomic_read(&vcrypto_dev->ref_count) != 0;
118 }
119 
120 /*
121  * virtcrypto_dev_get() - Increment vcrypto_dev reference count
122  * @vcrypto_dev: Pointer to virtio crypto device.
123  *
124  * Increment the vcrypto_dev refcount and if this is the first time
125  * incrementing it during this period the vcrypto_dev is in use,
126  * increment the module refcount too.
127  * To be used by virtio crypto device specific drivers.
128  *
129  * Return: 0 when successful, EFAULT when fail to bump module refcount
130  */
131 int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev)
132 {
133 	if (atomic_add_return(1, &vcrypto_dev->ref_count) == 1)
134 		if (!try_module_get(vcrypto_dev->owner))
135 			return -EFAULT;
136 	return 0;
137 }
138 
139 /*
140  * virtcrypto_dev_put() - Decrement vcrypto_dev reference count
141  * @vcrypto_dev: Pointer to virtio crypto device.
142  *
143  * Decrement the vcrypto_dev refcount and if this is the last time
144  * decrementing it during this period the vcrypto_dev is in use,
145  * decrement the module refcount too.
146  * To be used by virtio crypto device specific drivers.
147  *
148  * Return: void
149  */
150 void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev)
151 {
152 	if (atomic_sub_return(1, &vcrypto_dev->ref_count) == 0)
153 		module_put(vcrypto_dev->owner);
154 }
155 
156 /*
157  * virtcrypto_dev_started() - Check whether device has started
158  * @vcrypto_dev: Pointer to virtio crypto device.
159  *
160  * To be used by virtio crypto device specific drivers.
161  *
162  * Return: 1 when the device has started, 0 otherwise
163  */
164 int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev)
165 {
166 	return (vcrypto_dev->status & VIRTIO_CRYPTO_S_HW_READY);
167 }
168 
169 /*
170  * virtcrypto_get_dev_node() - Get vcrypto_dev on the node.
171  * @node:  Node id the driver works.
172  * @service: Crypto service that needs to be supported by the
173  *	      dev
174  * @algo: The algorithm number that needs to be supported by the
175  *	  dev
176  *
177  * Function returns the virtio crypto device used fewest on the node,
178  * and supports the given crypto service and algorithm.
179  *
180  * To be used by virtio crypto device specific drivers.
181  *
182  * Return: pointer to vcrypto_dev or NULL if not found.
183  */
184 struct virtio_crypto *virtcrypto_get_dev_node(int node, uint32_t service,
185 					      uint32_t algo)
186 {
187 	struct virtio_crypto *vcrypto_dev = NULL, *tmp_dev;
188 	unsigned long best = ~0;
189 	unsigned long ctr;
190 
191 	mutex_lock(&table_lock);
192 	list_for_each_entry(tmp_dev, virtcrypto_devmgr_get_head(), list) {
193 
194 		if ((node == dev_to_node(&tmp_dev->vdev->dev) ||
195 		     dev_to_node(&tmp_dev->vdev->dev) < 0) &&
196 		    virtcrypto_dev_started(tmp_dev) &&
197 		    virtcrypto_algo_is_supported(tmp_dev, service, algo)) {
198 			ctr = atomic_read(&tmp_dev->ref_count);
199 			if (best > ctr) {
200 				vcrypto_dev = tmp_dev;
201 				best = ctr;
202 			}
203 		}
204 	}
205 
206 	if (!vcrypto_dev) {
207 		pr_info("virtio_crypto: Could not find a device on node %d\n",
208 				node);
209 		/* Get any started device */
210 		list_for_each_entry(tmp_dev,
211 				virtcrypto_devmgr_get_head(), list) {
212 			if (virtcrypto_dev_started(tmp_dev) &&
213 			    virtcrypto_algo_is_supported(tmp_dev,
214 			    service, algo)) {
215 				vcrypto_dev = tmp_dev;
216 				break;
217 			}
218 		}
219 	}
220 	mutex_unlock(&table_lock);
221 	if (!vcrypto_dev)
222 		return NULL;
223 
224 	virtcrypto_dev_get(vcrypto_dev);
225 	return vcrypto_dev;
226 }
227 
228 /*
229  * virtcrypto_dev_start() - Start virtio crypto device
230  * @vcrypto:    Pointer to virtio crypto device.
231  *
232  * Function notifies all the registered services that the virtio crypto device
233  * is ready to be used.
234  * To be used by virtio crypto device specific drivers.
235  *
236  * Return: 0 on success, EFAULT when fail to register algorithms
237  */
238 int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
239 {
240 	if (virtio_crypto_skcipher_algs_register(vcrypto)) {
241 		pr_err("virtio_crypto: Failed to register crypto skcipher algs\n");
242 		return -EFAULT;
243 	}
244 
245 	if (virtio_crypto_akcipher_algs_register(vcrypto)) {
246 		pr_err("virtio_crypto: Failed to register crypto akcipher algs\n");
247 		virtio_crypto_skcipher_algs_unregister(vcrypto);
248 		return -EFAULT;
249 	}
250 
251 	return 0;
252 }
253 
254 /*
255  * virtcrypto_dev_stop() - Stop virtio crypto device
256  * @vcrypto:    Pointer to virtio crypto device.
257  *
258  * Function notifies all the registered services that the virtio crypto device
259  * is ready to be used.
260  * To be used by virtio crypto device specific drivers.
261  *
262  * Return: void
263  */
264 void virtcrypto_dev_stop(struct virtio_crypto *vcrypto)
265 {
266 	virtio_crypto_skcipher_algs_unregister(vcrypto);
267 	virtio_crypto_akcipher_algs_unregister(vcrypto);
268 }
269 
270 /*
271  * vcrypto_algo_is_supported()
272  * @vcrypto: Pointer to virtio crypto device.
273  * @service: The bit number for service validate.
274  *	      See VIRTIO_CRYPTO_SERVICE_*
275  * @algo : The bit number for the algorithm to validate.
276  *
277  *
278  * Validate if the virtio crypto device supports a service and
279  * algo.
280  *
281  * Return true if device supports a service and algo.
282  */
283 
284 bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto,
285 				  uint32_t service,
286 				  uint32_t algo)
287 {
288 	uint32_t service_mask = 1u << service;
289 	uint32_t algo_mask = 0;
290 	bool low = true;
291 
292 	if (algo > 31) {
293 		algo -= 32;
294 		low = false;
295 	}
296 
297 	if (!(vcrypto->crypto_services & service_mask))
298 		return false;
299 
300 	switch (service) {
301 	case VIRTIO_CRYPTO_SERVICE_CIPHER:
302 		if (low)
303 			algo_mask = vcrypto->cipher_algo_l;
304 		else
305 			algo_mask = vcrypto->cipher_algo_h;
306 		break;
307 
308 	case VIRTIO_CRYPTO_SERVICE_HASH:
309 		algo_mask = vcrypto->hash_algo;
310 		break;
311 
312 	case VIRTIO_CRYPTO_SERVICE_MAC:
313 		if (low)
314 			algo_mask = vcrypto->mac_algo_l;
315 		else
316 			algo_mask = vcrypto->mac_algo_h;
317 		break;
318 
319 	case VIRTIO_CRYPTO_SERVICE_AEAD:
320 		algo_mask = vcrypto->aead_algo;
321 		break;
322 
323 	case VIRTIO_CRYPTO_SERVICE_AKCIPHER:
324 		algo_mask = vcrypto->akcipher_algo;
325 		break;
326 	}
327 
328 	if (!(algo_mask & (1u << algo)))
329 		return false;
330 
331 	return true;
332 }
333