Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1 /* Driver for Virtio crypto device.
2 *
3 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/err.h>
20#include <linux/module.h>
21#include <linux/virtio_config.h>
22#include <linux/cpu.h>
23
24#include <uapi/linux/virtio_crypto.h>
25#include "virtio_crypto_common.h"
26
27
28void
29virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
30{
31 if (vc_req) {
32 kzfree(vc_req->iv);
33 kzfree(vc_req->req_data);
34 kfree(vc_req->sgs);
35 }
36}
37
38static void virtcrypto_dataq_callback(struct virtqueue *vq)
39{
40 struct virtio_crypto *vcrypto = vq->vdev->priv;
41 struct virtio_crypto_request *vc_req;
42 unsigned long flags;
43 unsigned int len;
44 struct ablkcipher_request *ablk_req;
45 int error;
46 unsigned int qid = vq->index;
47
48 spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags);
49 do {
50 virtqueue_disable_cb(vq);
51 while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
52 if (vc_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
53 switch (vc_req->status) {
54 case VIRTIO_CRYPTO_OK:
55 error = 0;
56 break;
57 case VIRTIO_CRYPTO_INVSESS:
58 case VIRTIO_CRYPTO_ERR:
59 error = -EINVAL;
60 break;
61 case VIRTIO_CRYPTO_BADMSG:
62 error = -EBADMSG;
63 break;
64 default:
65 error = -EIO;
66 break;
67 }
68 ablk_req = vc_req->ablkcipher_req;
69
70 spin_unlock_irqrestore(
71 &vcrypto->data_vq[qid].lock, flags);
72 /* Finish the encrypt or decrypt process */
73 virtio_crypto_ablkcipher_finalize_req(vc_req,
74 ablk_req, error);
75 spin_lock_irqsave(
76 &vcrypto->data_vq[qid].lock, flags);
77 }
78 }
79 } while (!virtqueue_enable_cb(vq));
80 spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags);
81}
82
83static int virtcrypto_find_vqs(struct virtio_crypto *vi)
84{
85 vq_callback_t **callbacks;
86 struct virtqueue **vqs;
87 int ret = -ENOMEM;
88 int i, total_vqs;
89 const char **names;
90 struct device *dev = &vi->vdev->dev;
91
92 /*
93 * We expect 1 data virtqueue, followed by
94 * possible N-1 data queues used in multiqueue mode,
95 * followed by control vq.
96 */
97 total_vqs = vi->max_data_queues + 1;
98
99 /* Allocate space for find_vqs parameters */
100 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
101 if (!vqs)
102 goto err_vq;
103 callbacks = kcalloc(total_vqs, sizeof(*callbacks), GFP_KERNEL);
104 if (!callbacks)
105 goto err_callback;
106 names = kcalloc(total_vqs, sizeof(*names), GFP_KERNEL);
107 if (!names)
108 goto err_names;
109
110 /* Parameters for control virtqueue */
111 callbacks[total_vqs - 1] = NULL;
112 names[total_vqs - 1] = "controlq";
113
114 /* Allocate/initialize parameters for data virtqueues */
115 for (i = 0; i < vi->max_data_queues; i++) {
116 callbacks[i] = virtcrypto_dataq_callback;
117 snprintf(vi->data_vq[i].name, sizeof(vi->data_vq[i].name),
118 "dataq.%d", i);
119 names[i] = vi->data_vq[i].name;
120 }
121
122 ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, callbacks, names, NULL);
123 if (ret)
124 goto err_find;
125
126 vi->ctrl_vq = vqs[total_vqs - 1];
127
128 for (i = 0; i < vi->max_data_queues; i++) {
129 spin_lock_init(&vi->data_vq[i].lock);
130 vi->data_vq[i].vq = vqs[i];
131 /* Initialize crypto engine */
132 vi->data_vq[i].engine = crypto_engine_alloc_init(dev, 1);
133 if (!vi->data_vq[i].engine) {
134 ret = -ENOMEM;
135 goto err_engine;
136 }
137
138 vi->data_vq[i].engine->cipher_one_request =
139 virtio_crypto_ablkcipher_crypt_req;
140 }
141
142 kfree(names);
143 kfree(callbacks);
144 kfree(vqs);
145
146 return 0;
147
148err_engine:
149err_find:
150 kfree(names);
151err_names:
152 kfree(callbacks);
153err_callback:
154 kfree(vqs);
155err_vq:
156 return ret;
157}
158
159static int virtcrypto_alloc_queues(struct virtio_crypto *vi)
160{
161 vi->data_vq = kcalloc(vi->max_data_queues, sizeof(*vi->data_vq),
162 GFP_KERNEL);
163 if (!vi->data_vq)
164 return -ENOMEM;
165
166 return 0;
167}
168
169static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)
170{
171 int i;
172
173 if (vi->affinity_hint_set) {
174 for (i = 0; i < vi->max_data_queues; i++)
175 virtqueue_set_affinity(vi->data_vq[i].vq, -1);
176
177 vi->affinity_hint_set = false;
178 }
179}
180
181static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto)
182{
183 int i = 0;
184 int cpu;
185
186 /*
187 * In single queue mode, we don't set the cpu affinity.
188 */
189 if (vcrypto->curr_queue == 1 || vcrypto->max_data_queues == 1) {
190 virtcrypto_clean_affinity(vcrypto, -1);
191 return;
192 }
193
194 /*
195 * In multiqueue mode, we let the queue to be private to one cpu
196 * by setting the affinity hint to eliminate the contention.
197 *
198 * TODO: adds cpu hotplug support by register cpu notifier.
199 *
200 */
201 for_each_online_cpu(cpu) {
202 virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpu);
203 if (++i >= vcrypto->max_data_queues)
204 break;
205 }
206
207 vcrypto->affinity_hint_set = true;
208}
209
210static void virtcrypto_free_queues(struct virtio_crypto *vi)
211{
212 kfree(vi->data_vq);
213}
214
215static int virtcrypto_init_vqs(struct virtio_crypto *vi)
216{
217 int ret;
218
219 /* Allocate send & receive queues */
220 ret = virtcrypto_alloc_queues(vi);
221 if (ret)
222 goto err;
223
224 ret = virtcrypto_find_vqs(vi);
225 if (ret)
226 goto err_free;
227
228 get_online_cpus();
229 virtcrypto_set_affinity(vi);
230 put_online_cpus();
231
232 return 0;
233
234err_free:
235 virtcrypto_free_queues(vi);
236err:
237 return ret;
238}
239
240static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
241{
242 u32 status;
243 int err;
244
245 virtio_cread(vcrypto->vdev,
246 struct virtio_crypto_config, status, &status);
247
248 /*
249 * Unknown status bits would be a host error and the driver
250 * should consider the device to be broken.
251 */
252 if (status & (~VIRTIO_CRYPTO_S_HW_READY)) {
253 dev_warn(&vcrypto->vdev->dev,
254 "Unknown status bits: 0x%x\n", status);
255
256 virtio_break_device(vcrypto->vdev);
257 return -EPERM;
258 }
259
260 if (vcrypto->status == status)
261 return 0;
262
263 vcrypto->status = status;
264
265 if (vcrypto->status & VIRTIO_CRYPTO_S_HW_READY) {
266 err = virtcrypto_dev_start(vcrypto);
267 if (err) {
268 dev_err(&vcrypto->vdev->dev,
269 "Failed to start virtio crypto device.\n");
270
271 return -EPERM;
272 }
273 dev_info(&vcrypto->vdev->dev, "Accelerator is ready\n");
274 } else {
275 virtcrypto_dev_stop(vcrypto);
276 dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n");
277 }
278
279 return 0;
280}
281
282static int virtcrypto_start_crypto_engines(struct virtio_crypto *vcrypto)
283{
284 int32_t i;
285 int ret;
286
287 for (i = 0; i < vcrypto->max_data_queues; i++) {
288 if (vcrypto->data_vq[i].engine) {
289 ret = crypto_engine_start(vcrypto->data_vq[i].engine);
290 if (ret)
291 goto err;
292 }
293 }
294
295 return 0;
296
297err:
298 while (--i >= 0)
299 if (vcrypto->data_vq[i].engine)
300 crypto_engine_exit(vcrypto->data_vq[i].engine);
301
302 return ret;
303}
304
305static void virtcrypto_clear_crypto_engines(struct virtio_crypto *vcrypto)
306{
307 u32 i;
308
309 for (i = 0; i < vcrypto->max_data_queues; i++)
310 if (vcrypto->data_vq[i].engine)
311 crypto_engine_exit(vcrypto->data_vq[i].engine);
312}
313
314static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
315{
316 struct virtio_device *vdev = vcrypto->vdev;
317
318 virtcrypto_clean_affinity(vcrypto, -1);
319
320 vdev->config->del_vqs(vdev);
321
322 virtcrypto_free_queues(vcrypto);
323}
324
325static int virtcrypto_probe(struct virtio_device *vdev)
326{
327 int err = -EFAULT;
328 struct virtio_crypto *vcrypto;
329 u32 max_data_queues = 0, max_cipher_key_len = 0;
330 u32 max_auth_key_len = 0;
331 u64 max_size = 0;
332
333 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
334 return -ENODEV;
335
336 if (!vdev->config->get) {
337 dev_err(&vdev->dev, "%s failure: config access disabled\n",
338 __func__);
339 return -EINVAL;
340 }
341
342 if (num_possible_nodes() > 1 && dev_to_node(&vdev->dev) < 0) {
343 /*
344 * If the accelerator is connected to a node with no memory
345 * there is no point in using the accelerator since the remote
346 * memory transaction will be very slow.
347 */
348 dev_err(&vdev->dev, "Invalid NUMA configuration.\n");
349 return -EINVAL;
350 }
351
352 vcrypto = kzalloc_node(sizeof(*vcrypto), GFP_KERNEL,
353 dev_to_node(&vdev->dev));
354 if (!vcrypto)
355 return -ENOMEM;
356
357 virtio_cread(vdev, struct virtio_crypto_config,
358 max_dataqueues, &max_data_queues);
359 if (max_data_queues < 1)
360 max_data_queues = 1;
361
362 virtio_cread(vdev, struct virtio_crypto_config,
363 max_cipher_key_len, &max_cipher_key_len);
364 virtio_cread(vdev, struct virtio_crypto_config,
365 max_auth_key_len, &max_auth_key_len);
366 virtio_cread(vdev, struct virtio_crypto_config,
367 max_size, &max_size);
368
369 /* Add virtio crypto device to global table */
370 err = virtcrypto_devmgr_add_dev(vcrypto);
371 if (err) {
372 dev_err(&vdev->dev, "Failed to add new virtio crypto device.\n");
373 goto free;
374 }
375 vcrypto->owner = THIS_MODULE;
376 vcrypto = vdev->priv = vcrypto;
377 vcrypto->vdev = vdev;
378
379 spin_lock_init(&vcrypto->ctrl_lock);
380
381 /* Use single data queue as default */
382 vcrypto->curr_queue = 1;
383 vcrypto->max_data_queues = max_data_queues;
384 vcrypto->max_cipher_key_len = max_cipher_key_len;
385 vcrypto->max_auth_key_len = max_auth_key_len;
386 vcrypto->max_size = max_size;
387
388 dev_info(&vdev->dev,
389 "max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
390 vcrypto->max_data_queues,
391 vcrypto->max_cipher_key_len,
392 vcrypto->max_auth_key_len,
393 vcrypto->max_size);
394
395 err = virtcrypto_init_vqs(vcrypto);
396 if (err) {
397 dev_err(&vdev->dev, "Failed to initialize vqs.\n");
398 goto free_dev;
399 }
400
401 err = virtcrypto_start_crypto_engines(vcrypto);
402 if (err)
403 goto free_vqs;
404
405 virtio_device_ready(vdev);
406
407 err = virtcrypto_update_status(vcrypto);
408 if (err)
409 goto free_engines;
410
411 return 0;
412
413free_engines:
414 virtcrypto_clear_crypto_engines(vcrypto);
415free_vqs:
416 vcrypto->vdev->config->reset(vdev);
417 virtcrypto_del_vqs(vcrypto);
418free_dev:
419 virtcrypto_devmgr_rm_dev(vcrypto);
420free:
421 kfree(vcrypto);
422 return err;
423}
424
425static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
426{
427 struct virtio_crypto_request *vc_req;
428 int i;
429 struct virtqueue *vq;
430
431 for (i = 0; i < vcrypto->max_data_queues; i++) {
432 vq = vcrypto->data_vq[i].vq;
433 while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) {
434 kfree(vc_req->req_data);
435 kfree(vc_req->sgs);
436 }
437 }
438}
439
440static void virtcrypto_remove(struct virtio_device *vdev)
441{
442 struct virtio_crypto *vcrypto = vdev->priv;
443
444 dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
445
446 if (virtcrypto_dev_started(vcrypto))
447 virtcrypto_dev_stop(vcrypto);
448 vdev->config->reset(vdev);
449 virtcrypto_free_unused_reqs(vcrypto);
450 virtcrypto_clear_crypto_engines(vcrypto);
451 virtcrypto_del_vqs(vcrypto);
452 virtcrypto_devmgr_rm_dev(vcrypto);
453 kfree(vcrypto);
454}
455
456static void virtcrypto_config_changed(struct virtio_device *vdev)
457{
458 struct virtio_crypto *vcrypto = vdev->priv;
459
460 virtcrypto_update_status(vcrypto);
461}
462
463#ifdef CONFIG_PM_SLEEP
464static int virtcrypto_freeze(struct virtio_device *vdev)
465{
466 struct virtio_crypto *vcrypto = vdev->priv;
467
468 vdev->config->reset(vdev);
469 virtcrypto_free_unused_reqs(vcrypto);
470 if (virtcrypto_dev_started(vcrypto))
471 virtcrypto_dev_stop(vcrypto);
472
473 virtcrypto_clear_crypto_engines(vcrypto);
474 virtcrypto_del_vqs(vcrypto);
475 return 0;
476}
477
478static int virtcrypto_restore(struct virtio_device *vdev)
479{
480 struct virtio_crypto *vcrypto = vdev->priv;
481 int err;
482
483 err = virtcrypto_init_vqs(vcrypto);
484 if (err)
485 return err;
486
487 err = virtcrypto_start_crypto_engines(vcrypto);
488 if (err)
489 goto free_vqs;
490
491 virtio_device_ready(vdev);
492
493 err = virtcrypto_dev_start(vcrypto);
494 if (err) {
495 dev_err(&vdev->dev, "Failed to start virtio crypto device.\n");
496 goto free_engines;
497 }
498
499 return 0;
500
501free_engines:
502 virtcrypto_clear_crypto_engines(vcrypto);
503free_vqs:
504 vcrypto->vdev->config->reset(vdev);
505 virtcrypto_del_vqs(vcrypto);
506 return err;
507}
508#endif
509
510static unsigned int features[] = {
511 /* none */
512};
513
514static struct virtio_device_id id_table[] = {
515 { VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID },
516 { 0 },
517};
518
519static struct virtio_driver virtio_crypto_driver = {
520 .driver.name = KBUILD_MODNAME,
521 .driver.owner = THIS_MODULE,
522 .feature_table = features,
523 .feature_table_size = ARRAY_SIZE(features),
524 .id_table = id_table,
525 .probe = virtcrypto_probe,
526 .remove = virtcrypto_remove,
527 .config_changed = virtcrypto_config_changed,
528#ifdef CONFIG_PM_SLEEP
529 .freeze = virtcrypto_freeze,
530 .restore = virtcrypto_restore,
531#endif
532};
533
534module_virtio_driver(virtio_crypto_driver);
535
536MODULE_DEVICE_TABLE(virtio, id_table);
537MODULE_DESCRIPTION("virtio crypto device driver");
538MODULE_LICENSE("GPL");
539MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>");