Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.2 601 lines 14 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (c) 2018 Chelsio Communications, Inc. 4 * 5 * Written by: Atul Gupta (atul.gupta@chelsio.com) 6 */ 7#include <linux/kernel.h> 8#include <linux/module.h> 9#include <linux/skbuff.h> 10#include <linux/socket.h> 11#include <linux/hash.h> 12#include <linux/in.h> 13#include <linux/net.h> 14#include <linux/ip.h> 15#include <linux/tcp.h> 16#include <net/tcp.h> 17#include <net/tls.h> 18 19#include "chtls.h" 20#include "chtls_cm.h" 21 22#define DRV_NAME "chtls" 23 24/* 25 * chtls device management 26 * maintains a list of the chtls devices 27 */ 28static LIST_HEAD(cdev_list); 29static DEFINE_MUTEX(cdev_mutex); 30 31static DEFINE_MUTEX(notify_mutex); 32static RAW_NOTIFIER_HEAD(listen_notify_list); 33static struct proto chtls_cpl_prot; 34struct request_sock_ops chtls_rsk_ops; 35static uint send_page_order = (14 - PAGE_SHIFT < 0) ? 0 : 14 - PAGE_SHIFT; 36 37static void register_listen_notifier(struct notifier_block *nb) 38{ 39 mutex_lock(&notify_mutex); 40 raw_notifier_chain_register(&listen_notify_list, nb); 41 mutex_unlock(&notify_mutex); 42} 43 44static void unregister_listen_notifier(struct notifier_block *nb) 45{ 46 mutex_lock(&notify_mutex); 47 raw_notifier_chain_unregister(&listen_notify_list, nb); 48 mutex_unlock(&notify_mutex); 49} 50 51static int listen_notify_handler(struct notifier_block *this, 52 unsigned long event, void *data) 53{ 54 struct chtls_listen *clisten; 55 int ret = NOTIFY_DONE; 56 57 clisten = (struct chtls_listen *)data; 58 59 switch (event) { 60 case CHTLS_LISTEN_START: 61 ret = chtls_listen_start(clisten->cdev, clisten->sk); 62 kfree(clisten); 63 break; 64 case CHTLS_LISTEN_STOP: 65 chtls_listen_stop(clisten->cdev, clisten->sk); 66 kfree(clisten); 67 break; 68 } 69 return ret; 70} 71 72static struct notifier_block listen_notifier = { 73 .notifier_call = listen_notify_handler 74}; 75 76static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb) 77{ 78 if (likely(skb_transport_header(skb) != skb_network_header(skb))) 79 return tcp_v4_do_rcv(sk, skb); 80 BLOG_SKB_CB(skb)->backlog_rcv(sk, skb); 81 return 0; 82} 83 84static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk) 85{ 86 struct chtls_listen *clisten; 87 int err; 88 89 if (sk->sk_protocol != IPPROTO_TCP) 90 return -EPROTONOSUPPORT; 91 92 if (sk->sk_family == PF_INET && 93 LOOPBACK(inet_sk(sk)->inet_rcv_saddr)) 94 return -EADDRNOTAVAIL; 95 96 sk->sk_backlog_rcv = listen_backlog_rcv; 97 clisten = kmalloc(sizeof(*clisten), GFP_KERNEL); 98 if (!clisten) 99 return -ENOMEM; 100 clisten->cdev = cdev; 101 clisten->sk = sk; 102 mutex_lock(&notify_mutex); 103 err = raw_notifier_call_chain(&listen_notify_list, 104 CHTLS_LISTEN_START, clisten); 105 mutex_unlock(&notify_mutex); 106 return err; 107} 108 109static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk) 110{ 111 struct chtls_listen *clisten; 112 113 if (sk->sk_protocol != IPPROTO_TCP) 114 return; 115 116 clisten = kmalloc(sizeof(*clisten), GFP_KERNEL); 117 if (!clisten) 118 return; 119 clisten->cdev = cdev; 120 clisten->sk = sk; 121 mutex_lock(&notify_mutex); 122 raw_notifier_call_chain(&listen_notify_list, 123 CHTLS_LISTEN_STOP, clisten); 124 mutex_unlock(&notify_mutex); 125} 126 127static int chtls_inline_feature(struct tls_device *dev) 128{ 129 struct net_device *netdev; 130 struct chtls_dev *cdev; 131 int i; 132 133 cdev = to_chtls_dev(dev); 134 135 for (i = 0; i < cdev->lldi->nports; i++) { 136 netdev = cdev->ports[i]; 137 if (netdev->features & NETIF_F_HW_TLS_RECORD) 138 return 1; 139 } 140 return 0; 141} 142 143static int chtls_create_hash(struct tls_device *dev, struct sock *sk) 144{ 145 struct chtls_dev *cdev = to_chtls_dev(dev); 146 147 if (sk->sk_state == TCP_LISTEN) 148 return chtls_start_listen(cdev, sk); 149 return 0; 150} 151 152static void chtls_destroy_hash(struct tls_device *dev, struct sock *sk) 153{ 154 struct chtls_dev *cdev = to_chtls_dev(dev); 155 156 if (sk->sk_state == TCP_LISTEN) 157 chtls_stop_listen(cdev, sk); 158} 159 160static void chtls_free_uld(struct chtls_dev *cdev) 161{ 162 int i; 163 164 tls_unregister_device(&cdev->tlsdev); 165 kvfree(cdev->kmap.addr); 166 idr_destroy(&cdev->hwtid_idr); 167 for (i = 0; i < (1 << RSPQ_HASH_BITS); i++) 168 kfree_skb(cdev->rspq_skb_cache[i]); 169 kfree(cdev->lldi); 170 kfree_skb(cdev->askb); 171 kfree(cdev); 172} 173 174static inline void chtls_dev_release(struct kref *kref) 175{ 176 struct chtls_dev *cdev; 177 struct tls_device *dev; 178 179 dev = container_of(kref, struct tls_device, kref); 180 cdev = to_chtls_dev(dev); 181 chtls_free_uld(cdev); 182} 183 184static void chtls_register_dev(struct chtls_dev *cdev) 185{ 186 struct tls_device *tlsdev = &cdev->tlsdev; 187 188 strlcpy(tlsdev->name, "chtls", TLS_DEVICE_NAME_MAX); 189 strlcat(tlsdev->name, cdev->lldi->ports[0]->name, 190 TLS_DEVICE_NAME_MAX); 191 tlsdev->feature = chtls_inline_feature; 192 tlsdev->hash = chtls_create_hash; 193 tlsdev->unhash = chtls_destroy_hash; 194 tlsdev->release = chtls_dev_release; 195 kref_init(&tlsdev->kref); 196 tls_register_device(tlsdev); 197 cdev->cdev_state = CHTLS_CDEV_STATE_UP; 198} 199 200static void process_deferq(struct work_struct *task_param) 201{ 202 struct chtls_dev *cdev = container_of(task_param, 203 struct chtls_dev, deferq_task); 204 struct sk_buff *skb; 205 206 spin_lock_bh(&cdev->deferq.lock); 207 while ((skb = __skb_dequeue(&cdev->deferq)) != NULL) { 208 spin_unlock_bh(&cdev->deferq.lock); 209 DEFERRED_SKB_CB(skb)->handler(cdev, skb); 210 spin_lock_bh(&cdev->deferq.lock); 211 } 212 spin_unlock_bh(&cdev->deferq.lock); 213} 214 215static int chtls_get_skb(struct chtls_dev *cdev) 216{ 217 cdev->askb = alloc_skb(sizeof(struct tcphdr), GFP_KERNEL); 218 if (!cdev->askb) 219 return -ENOMEM; 220 221 skb_put(cdev->askb, sizeof(struct tcphdr)); 222 skb_reset_transport_header(cdev->askb); 223 memset(cdev->askb->data, 0, cdev->askb->len); 224 return 0; 225} 226 227static void *chtls_uld_add(const struct cxgb4_lld_info *info) 228{ 229 struct cxgb4_lld_info *lldi; 230 struct chtls_dev *cdev; 231 int i, j; 232 233 cdev = kzalloc(sizeof(*cdev) + info->nports * 234 (sizeof(struct net_device *)), GFP_KERNEL); 235 if (!cdev) 236 goto out; 237 238 lldi = kzalloc(sizeof(*lldi), GFP_KERNEL); 239 if (!lldi) 240 goto out_lldi; 241 242 if (chtls_get_skb(cdev)) 243 goto out_skb; 244 245 *lldi = *info; 246 cdev->lldi = lldi; 247 cdev->pdev = lldi->pdev; 248 cdev->tids = lldi->tids; 249 cdev->ports = lldi->ports; 250 cdev->mtus = lldi->mtus; 251 cdev->tids = lldi->tids; 252 cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0])) 253 << FW_VIID_PFN_S; 254 255 for (i = 0; i < (1 << RSPQ_HASH_BITS); i++) { 256 unsigned int size = 64 - sizeof(struct rsp_ctrl) - 8; 257 258 cdev->rspq_skb_cache[i] = __alloc_skb(size, 259 gfp_any(), 0, 260 lldi->nodeid); 261 if (unlikely(!cdev->rspq_skb_cache[i])) 262 goto out_rspq_skb; 263 } 264 265 idr_init(&cdev->hwtid_idr); 266 INIT_WORK(&cdev->deferq_task, process_deferq); 267 spin_lock_init(&cdev->listen_lock); 268 spin_lock_init(&cdev->idr_lock); 269 cdev->send_page_order = min_t(uint, get_order(32768), 270 send_page_order); 271 cdev->max_host_sndbuf = 48 * 1024; 272 273 if (lldi->vr->key.size) 274 if (chtls_init_kmap(cdev, lldi)) 275 goto out_rspq_skb; 276 277 mutex_lock(&cdev_mutex); 278 list_add_tail(&cdev->list, &cdev_list); 279 mutex_unlock(&cdev_mutex); 280 281 return cdev; 282out_rspq_skb: 283 for (j = 0; j < i; j++) 284 kfree_skb(cdev->rspq_skb_cache[j]); 285 kfree_skb(cdev->askb); 286out_skb: 287 kfree(lldi); 288out_lldi: 289 kfree(cdev); 290out: 291 return NULL; 292} 293 294static void chtls_free_all_uld(void) 295{ 296 struct chtls_dev *cdev, *tmp; 297 298 mutex_lock(&cdev_mutex); 299 list_for_each_entry_safe(cdev, tmp, &cdev_list, list) { 300 if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) { 301 list_del(&cdev->list); 302 kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release); 303 } 304 } 305 mutex_unlock(&cdev_mutex); 306} 307 308static int chtls_uld_state_change(void *handle, enum cxgb4_state new_state) 309{ 310 struct chtls_dev *cdev = handle; 311 312 switch (new_state) { 313 case CXGB4_STATE_UP: 314 chtls_register_dev(cdev); 315 break; 316 case CXGB4_STATE_DOWN: 317 break; 318 case CXGB4_STATE_START_RECOVERY: 319 break; 320 case CXGB4_STATE_DETACH: 321 mutex_lock(&cdev_mutex); 322 list_del(&cdev->list); 323 mutex_unlock(&cdev_mutex); 324 kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release); 325 break; 326 default: 327 break; 328 } 329 return 0; 330} 331 332static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl, 333 const __be64 *rsp, 334 u32 pktshift) 335{ 336 struct sk_buff *skb; 337 338 /* Allocate space for cpl_pass_accpet_req which will be synthesized by 339 * driver. Once driver synthesizes cpl_pass_accpet_req the skb will go 340 * through the regular cpl_pass_accept_req processing in TOM. 341 */ 342 skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) 343 - pktshift, GFP_ATOMIC); 344 if (unlikely(!skb)) 345 return NULL; 346 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) 347 - pktshift); 348 /* For now we will copy cpl_rx_pkt in the skb */ 349 skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_rx_pkt)); 350 skb_copy_to_linear_data_offset(skb, sizeof(struct cpl_pass_accept_req) 351 , gl->va + pktshift, 352 gl->tot_len - pktshift); 353 354 return skb; 355} 356 357static int chtls_recv_packet(struct chtls_dev *cdev, 358 const struct pkt_gl *gl, const __be64 *rsp) 359{ 360 unsigned int opcode = *(u8 *)rsp; 361 struct sk_buff *skb; 362 int ret; 363 364 skb = copy_gl_to_skb_pkt(gl, rsp, cdev->lldi->sge_pktshift); 365 if (!skb) 366 return -ENOMEM; 367 368 ret = chtls_handlers[opcode](cdev, skb); 369 if (ret & CPL_RET_BUF_DONE) 370 kfree_skb(skb); 371 372 return 0; 373} 374 375static int chtls_recv_rsp(struct chtls_dev *cdev, const __be64 *rsp) 376{ 377 unsigned long rspq_bin; 378 unsigned int opcode; 379 struct sk_buff *skb; 380 unsigned int len; 381 int ret; 382 383 len = 64 - sizeof(struct rsp_ctrl) - 8; 384 opcode = *(u8 *)rsp; 385 386 rspq_bin = hash_ptr((void *)rsp, RSPQ_HASH_BITS); 387 skb = cdev->rspq_skb_cache[rspq_bin]; 388 if (skb && !skb_is_nonlinear(skb) && 389 !skb_shared(skb) && !skb_cloned(skb)) { 390 refcount_inc(&skb->users); 391 if (refcount_read(&skb->users) == 2) { 392 __skb_trim(skb, 0); 393 if (skb_tailroom(skb) >= len) 394 goto copy_out; 395 } 396 refcount_dec(&skb->users); 397 } 398 skb = alloc_skb(len, GFP_ATOMIC); 399 if (unlikely(!skb)) 400 return -ENOMEM; 401 402copy_out: 403 __skb_put(skb, len); 404 skb_copy_to_linear_data(skb, rsp, len); 405 skb_reset_network_header(skb); 406 skb_reset_transport_header(skb); 407 ret = chtls_handlers[opcode](cdev, skb); 408 409 if (ret & CPL_RET_BUF_DONE) 410 kfree_skb(skb); 411 return 0; 412} 413 414static void chtls_recv(struct chtls_dev *cdev, 415 struct sk_buff **skbs, const __be64 *rsp) 416{ 417 struct sk_buff *skb = *skbs; 418 unsigned int opcode; 419 int ret; 420 421 opcode = *(u8 *)rsp; 422 423 __skb_push(skb, sizeof(struct rss_header)); 424 skb_copy_to_linear_data(skb, rsp, sizeof(struct rss_header)); 425 426 ret = chtls_handlers[opcode](cdev, skb); 427 if (ret & CPL_RET_BUF_DONE) 428 kfree_skb(skb); 429} 430 431static int chtls_uld_rx_handler(void *handle, const __be64 *rsp, 432 const struct pkt_gl *gl) 433{ 434 struct chtls_dev *cdev = handle; 435 unsigned int opcode; 436 struct sk_buff *skb; 437 438 opcode = *(u8 *)rsp; 439 440 if (unlikely(opcode == CPL_RX_PKT)) { 441 if (chtls_recv_packet(cdev, gl, rsp) < 0) 442 goto nomem; 443 return 0; 444 } 445 446 if (!gl) 447 return chtls_recv_rsp(cdev, rsp); 448 449#define RX_PULL_LEN 128 450 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); 451 if (unlikely(!skb)) 452 goto nomem; 453 chtls_recv(cdev, &skb, rsp); 454 return 0; 455 456nomem: 457 return -ENOMEM; 458} 459 460static int do_chtls_getsockopt(struct sock *sk, char __user *optval, 461 int __user *optlen) 462{ 463 struct tls_crypto_info crypto_info = { 0 }; 464 465 crypto_info.version = TLS_1_2_VERSION; 466 if (copy_to_user(optval, &crypto_info, sizeof(struct tls_crypto_info))) 467 return -EFAULT; 468 return 0; 469} 470 471static int chtls_getsockopt(struct sock *sk, int level, int optname, 472 char __user *optval, int __user *optlen) 473{ 474 struct tls_context *ctx = tls_get_ctx(sk); 475 476 if (level != SOL_TLS) 477 return ctx->getsockopt(sk, level, optname, optval, optlen); 478 479 return do_chtls_getsockopt(sk, optval, optlen); 480} 481 482static int do_chtls_setsockopt(struct sock *sk, int optname, 483 char __user *optval, unsigned int optlen) 484{ 485 struct tls_crypto_info *crypto_info, tmp_crypto_info; 486 struct chtls_sock *csk; 487 int keylen; 488 int rc = 0; 489 490 csk = rcu_dereference_sk_user_data(sk); 491 492 if (!optval || optlen < sizeof(*crypto_info)) { 493 rc = -EINVAL; 494 goto out; 495 } 496 497 rc = copy_from_user(&tmp_crypto_info, optval, sizeof(*crypto_info)); 498 if (rc) { 499 rc = -EFAULT; 500 goto out; 501 } 502 503 /* check version */ 504 if (tmp_crypto_info.version != TLS_1_2_VERSION) { 505 rc = -ENOTSUPP; 506 goto out; 507 } 508 509 crypto_info = (struct tls_crypto_info *)&csk->tlshws.crypto_info; 510 511 switch (tmp_crypto_info.cipher_type) { 512 case TLS_CIPHER_AES_GCM_128: { 513 /* Obtain version and type from previous copy */ 514 crypto_info[0] = tmp_crypto_info; 515 /* Now copy the following data */ 516 rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info), 517 optval + sizeof(*crypto_info), 518 sizeof(struct tls12_crypto_info_aes_gcm_128) 519 - sizeof(*crypto_info)); 520 521 if (rc) { 522 rc = -EFAULT; 523 goto out; 524 } 525 526 keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE; 527 rc = chtls_setkey(csk, keylen, optname); 528 break; 529 } 530 default: 531 rc = -EINVAL; 532 goto out; 533 } 534out: 535 return rc; 536} 537 538static int chtls_setsockopt(struct sock *sk, int level, int optname, 539 char __user *optval, unsigned int optlen) 540{ 541 struct tls_context *ctx = tls_get_ctx(sk); 542 543 if (level != SOL_TLS) 544 return ctx->setsockopt(sk, level, optname, optval, optlen); 545 546 return do_chtls_setsockopt(sk, optname, optval, optlen); 547} 548 549static struct cxgb4_uld_info chtls_uld_info = { 550 .name = DRV_NAME, 551 .nrxq = MAX_ULD_QSETS, 552 .ntxq = MAX_ULD_QSETS, 553 .rxq_size = 1024, 554 .add = chtls_uld_add, 555 .state_change = chtls_uld_state_change, 556 .rx_handler = chtls_uld_rx_handler, 557}; 558 559void chtls_install_cpl_ops(struct sock *sk) 560{ 561 sk->sk_prot = &chtls_cpl_prot; 562} 563 564static void __init chtls_init_ulp_ops(void) 565{ 566 chtls_cpl_prot = tcp_prot; 567 chtls_init_rsk_ops(&chtls_cpl_prot, &chtls_rsk_ops, 568 &tcp_prot, PF_INET); 569 chtls_cpl_prot.close = chtls_close; 570 chtls_cpl_prot.disconnect = chtls_disconnect; 571 chtls_cpl_prot.destroy = chtls_destroy_sock; 572 chtls_cpl_prot.shutdown = chtls_shutdown; 573 chtls_cpl_prot.sendmsg = chtls_sendmsg; 574 chtls_cpl_prot.sendpage = chtls_sendpage; 575 chtls_cpl_prot.recvmsg = chtls_recvmsg; 576 chtls_cpl_prot.setsockopt = chtls_setsockopt; 577 chtls_cpl_prot.getsockopt = chtls_getsockopt; 578} 579 580static int __init chtls_register(void) 581{ 582 chtls_init_ulp_ops(); 583 register_listen_notifier(&listen_notifier); 584 cxgb4_register_uld(CXGB4_ULD_TLS, &chtls_uld_info); 585 return 0; 586} 587 588static void __exit chtls_unregister(void) 589{ 590 unregister_listen_notifier(&listen_notifier); 591 chtls_free_all_uld(); 592 cxgb4_unregister_uld(CXGB4_ULD_TLS); 593} 594 595module_init(chtls_register); 596module_exit(chtls_unregister); 597 598MODULE_DESCRIPTION("Chelsio TLS Inline driver"); 599MODULE_LICENSE("GPL"); 600MODULE_AUTHOR("Chelsio Communications"); 601MODULE_VERSION(DRV_VERSION);