Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'nfp-tls-add-basic-TX-offload'

Jakub Kicinski says:

====================
nfp: tls: add basic TX offload

This series adds initial TLS offload support to the nfp driver.
Only TX side is added for now. We need minor adjustments to
the core tls code:
- expose the per-skb fallback helper;
- grow the driver context slightly;
- add a helper to get to the driver state more easily.
We only support TX offload for now, and only if all packets
keep coming in order. For retransmissions we use the
aforementioned software fallback, and in case there are
local drops we completely give up on given TCP stream.

This will obviously be improved soon, this patch set is the
minimal, functional yet easily reviewable chunk.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+1421 -47
+1
drivers/net/ethernet/netronome/Kconfig
··· 20 20 tristate "Netronome(R) NFP4000/NFP6000 NIC driver" 21 21 depends on PCI && PCI_MSI 22 22 depends on VXLAN || VXLAN=n 23 + depends on TLS && TLS_DEVICE || TLS_DEVICE=n 23 24 select NET_DEVLINK 24 25 ---help--- 25 26 This driver supports the Netronome(R) NFP4000/NFP6000 based
+6
drivers/net/ethernet/netronome/nfp/Makefile
··· 16 16 nfpcore/nfp_rtsym.o \ 17 17 nfpcore/nfp_target.o \ 18 18 ccm.o \ 19 + ccm_mbox.o \ 19 20 nfp_asm.o \ 20 21 nfp_app.o \ 21 22 nfp_app_nic.o \ ··· 34 33 nfp_port.o \ 35 34 nfp_shared_buf.o \ 36 35 nic/main.o 36 + 37 + ifeq ($(CONFIG_TLS_DEVICE),y) 38 + nfp-objs += \ 39 + crypto/tls.o 40 + endif 37 41 38 42 ifeq ($(CONFIG_NFP_APP_FLOWER),y) 39 43 nfp-objs += \
-3
drivers/net/ethernet/netronome/nfp/ccm.c
··· 7 7 #include "nfp_app.h" 8 8 #include "nfp_net.h" 9 9 10 - #define NFP_CCM_TYPE_REPLY_BIT 7 11 - #define __NFP_CCM_REPLY(req) (BIT(NFP_CCM_TYPE_REPLY_BIT) | (req)) 12 - 13 10 #define ccm_warn(app, msg...) nn_dp_warn(&(app)->ctrl->dp, msg) 14 11 15 12 #define NFP_CCM_TAG_ALLOC_SPAN (U16_MAX / 4)
+43 -5
drivers/net/ethernet/netronome/nfp/ccm.h
··· 9 9 #include <linux/wait.h> 10 10 11 11 struct nfp_app; 12 + struct nfp_net; 12 13 13 14 /* Firmware ABI */ 14 15 ··· 22 21 NFP_CCM_TYPE_BPF_MAP_GETNEXT = 6, 23 22 NFP_CCM_TYPE_BPF_MAP_GETFIRST = 7, 24 23 NFP_CCM_TYPE_BPF_BPF_EVENT = 8, 24 + NFP_CCM_TYPE_CRYPTO_RESET = 9, 25 + NFP_CCM_TYPE_CRYPTO_ADD = 10, 26 + NFP_CCM_TYPE_CRYPTO_DEL = 11, 27 + NFP_CCM_TYPE_CRYPTO_UPDATE = 12, 25 28 __NFP_CCM_TYPE_MAX, 26 29 }; 27 30 28 31 #define NFP_CCM_ABI_VERSION 1 29 32 33 + #define NFP_CCM_TYPE_REPLY_BIT 7 34 + #define __NFP_CCM_REPLY(req) (BIT(NFP_CCM_TYPE_REPLY_BIT) | (req)) 35 + 30 36 struct nfp_ccm_hdr { 31 - u8 type; 32 - u8 ver; 33 - __be16 tag; 37 + union { 38 + struct { 39 + u8 type; 40 + u8 ver; 41 + __be16 tag; 42 + }; 43 + __be32 raw; 44 + }; 34 45 }; 35 46 36 47 static inline u8 nfp_ccm_get_type(struct sk_buff *skb) ··· 54 41 return hdr->type; 55 42 } 56 43 57 - static inline unsigned int nfp_ccm_get_tag(struct sk_buff *skb) 44 + static inline __be16 __nfp_ccm_get_tag(struct sk_buff *skb) 58 45 { 59 46 struct nfp_ccm_hdr *hdr; 60 47 61 48 hdr = (struct nfp_ccm_hdr *)skb->data; 62 49 63 - return be16_to_cpu(hdr->tag); 50 + return hdr->tag; 64 51 } 52 + 53 + static inline unsigned int nfp_ccm_get_tag(struct sk_buff *skb) 54 + { 55 + return be16_to_cpu(__nfp_ccm_get_tag(skb)); 56 + } 57 + 58 + #define NFP_NET_MBOX_TLV_TYPE GENMASK(31, 16) 59 + #define NFP_NET_MBOX_TLV_LEN GENMASK(15, 0) 60 + 61 + enum nfp_ccm_mbox_tlv_type { 62 + NFP_NET_MBOX_TLV_TYPE_UNKNOWN = 0, 63 + NFP_NET_MBOX_TLV_TYPE_END = 1, 64 + NFP_NET_MBOX_TLV_TYPE_MSG = 2, 65 + NFP_NET_MBOX_TLV_TYPE_MSG_NOSUP = 3, 66 + NFP_NET_MBOX_TLV_TYPE_RESV = 4, 67 + }; 65 68 66 69 /* Implementation */ 67 70 ··· 109 80 struct sk_buff * 110 81 nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb, 111 82 enum nfp_ccm_type type, unsigned int reply_size); 83 + 84 + bool nfp_ccm_mbox_fits(struct nfp_net *nn, unsigned int size); 85 + struct sk_buff * 86 + nfp_ccm_mbox_alloc(struct nfp_net *nn, unsigned int req_size, 87 + unsigned int reply_size, gfp_t flags); 88 + int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb, 89 + enum nfp_ccm_type type, 90 + unsigned int reply_size, 91 + unsigned int max_reply_size); 112 92 #endif
+591
drivers/net/ethernet/netronome/nfp/ccm_mbox.c
··· 1 + // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + /* Copyright (C) 2019 Netronome Systems, Inc. */ 3 + 4 + #include <linux/bitfield.h> 5 + #include <linux/io.h> 6 + #include <linux/skbuff.h> 7 + 8 + #include "ccm.h" 9 + #include "nfp_net.h" 10 + 11 + /* CCM messages via the mailbox. CMSGs get wrapped into simple TLVs 12 + * and copied into the mailbox. Multiple messages can be copied to 13 + * form a batch. Threads come in with CMSG formed in an skb, then 14 + * enqueue that skb onto the request queue. If threads skb is first 15 + * in queue this thread will handle the mailbox operation. It copies 16 + * up to 16 messages into the mailbox (making sure that both requests 17 + * and replies will fit. After FW is done processing the batch it 18 + * copies the data out and wakes waiting threads. 19 + * If a thread is waiting it either gets its the message completed 20 + * (response is copied into the same skb as the request, overwriting 21 + * it), or becomes the first in queue. 22 + * Completions and next-to-run are signaled via the control buffer 23 + * to limit potential cache line bounces. 24 + */ 25 + 26 + #define NFP_CCM_MBOX_BATCH_LIMIT 16 27 + #define NFP_CCM_TIMEOUT (NFP_NET_POLL_TIMEOUT * 1000) 28 + #define NFP_CCM_MAX_QLEN 256 29 + 30 + enum nfp_net_mbox_cmsg_state { 31 + NFP_NET_MBOX_CMSG_STATE_QUEUED, 32 + NFP_NET_MBOX_CMSG_STATE_NEXT, 33 + NFP_NET_MBOX_CMSG_STATE_BUSY, 34 + NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND, 35 + NFP_NET_MBOX_CMSG_STATE_DONE, 36 + }; 37 + 38 + /** 39 + * struct nfp_ccm_mbox_skb_cb - CCM mailbox specific info 40 + * @state: processing state (/stage) of the message 41 + * @err: error encountered during processing if any 42 + * @max_len: max(request_len, reply_len) 43 + * @exp_reply: expected reply length (0 means don't validate) 44 + */ 45 + struct nfp_ccm_mbox_cmsg_cb { 46 + enum nfp_net_mbox_cmsg_state state; 47 + int err; 48 + unsigned int max_len; 49 + unsigned int exp_reply; 50 + }; 51 + 52 + static u32 nfp_ccm_mbox_max_msg(struct nfp_net *nn) 53 + { 54 + return round_down(nn->tlv_caps.mbox_len, 4) - 55 + NFP_NET_CFG_MBOX_SIMPLE_VAL - /* common mbox command header */ 56 + 4 * 2; /* Msg TLV plus End TLV headers */ 57 + } 58 + 59 + static void 60 + nfp_ccm_mbox_msg_init(struct sk_buff *skb, unsigned int exp_reply, int max_len) 61 + { 62 + struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb; 63 + 64 + cb->state = NFP_NET_MBOX_CMSG_STATE_QUEUED; 65 + cb->err = 0; 66 + cb->max_len = max_len; 67 + cb->exp_reply = exp_reply; 68 + } 69 + 70 + static int nfp_ccm_mbox_maxlen(const struct sk_buff *skb) 71 + { 72 + struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb; 73 + 74 + return cb->max_len; 75 + } 76 + 77 + static bool nfp_ccm_mbox_done(struct sk_buff *skb) 78 + { 79 + struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb; 80 + 81 + return cb->state == NFP_NET_MBOX_CMSG_STATE_DONE; 82 + } 83 + 84 + static bool nfp_ccm_mbox_in_progress(struct sk_buff *skb) 85 + { 86 + struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb; 87 + 88 + return cb->state != NFP_NET_MBOX_CMSG_STATE_QUEUED && 89 + cb->state != NFP_NET_MBOX_CMSG_STATE_NEXT; 90 + } 91 + 92 + static void nfp_ccm_mbox_set_busy(struct sk_buff *skb) 93 + { 94 + struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb; 95 + 96 + cb->state = NFP_NET_MBOX_CMSG_STATE_BUSY; 97 + } 98 + 99 + static bool nfp_ccm_mbox_is_first(struct nfp_net *nn, struct sk_buff *skb) 100 + { 101 + return skb_queue_is_first(&nn->mbox_cmsg.queue, skb); 102 + } 103 + 104 + static bool nfp_ccm_mbox_should_run(struct nfp_net *nn, struct sk_buff *skb) 105 + { 106 + struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb; 107 + 108 + return cb->state == NFP_NET_MBOX_CMSG_STATE_NEXT; 109 + } 110 + 111 + static void nfp_ccm_mbox_mark_next_runner(struct nfp_net *nn) 112 + { 113 + struct nfp_ccm_mbox_cmsg_cb *cb; 114 + struct sk_buff *skb; 115 + 116 + skb = skb_peek(&nn->mbox_cmsg.queue); 117 + if (!skb) 118 + return; 119 + 120 + cb = (void *)skb->cb; 121 + cb->state = NFP_NET_MBOX_CMSG_STATE_NEXT; 122 + } 123 + 124 + static void 125 + nfp_ccm_mbox_write_tlv(struct nfp_net *nn, u32 off, u32 type, u32 len) 126 + { 127 + nn_writel(nn, off, 128 + FIELD_PREP(NFP_NET_MBOX_TLV_TYPE, type) | 129 + FIELD_PREP(NFP_NET_MBOX_TLV_LEN, len)); 130 + } 131 + 132 + static void nfp_ccm_mbox_copy_in(struct nfp_net *nn, struct sk_buff *last) 133 + { 134 + struct sk_buff *skb; 135 + int reserve, i, cnt; 136 + __be32 *data; 137 + u32 off, len; 138 + 139 + off = nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL; 140 + skb = __skb_peek(&nn->mbox_cmsg.queue); 141 + while (true) { 142 + nfp_ccm_mbox_write_tlv(nn, off, NFP_NET_MBOX_TLV_TYPE_MSG, 143 + skb->len); 144 + off += 4; 145 + 146 + /* Write data word by word, skb->data should be aligned */ 147 + data = (__be32 *)skb->data; 148 + cnt = skb->len / 4; 149 + for (i = 0 ; i < cnt; i++) { 150 + nn_writel(nn, off, be32_to_cpu(data[i])); 151 + off += 4; 152 + } 153 + if (skb->len & 3) { 154 + __be32 tmp = 0; 155 + 156 + memcpy(&tmp, &data[i], skb->len & 3); 157 + nn_writel(nn, off, be32_to_cpu(tmp)); 158 + off += 4; 159 + } 160 + 161 + /* Reserve space if reply is bigger */ 162 + len = round_up(skb->len, 4); 163 + reserve = nfp_ccm_mbox_maxlen(skb) - len; 164 + if (reserve > 0) { 165 + nfp_ccm_mbox_write_tlv(nn, off, 166 + NFP_NET_MBOX_TLV_TYPE_RESV, 167 + reserve); 168 + off += 4 + reserve; 169 + } 170 + 171 + if (skb == last) 172 + break; 173 + skb = skb_queue_next(&nn->mbox_cmsg.queue, skb); 174 + } 175 + 176 + nfp_ccm_mbox_write_tlv(nn, off, NFP_NET_MBOX_TLV_TYPE_END, 0); 177 + } 178 + 179 + static struct sk_buff * 180 + nfp_ccm_mbox_find_req(struct nfp_net *nn, __be16 tag, struct sk_buff *last) 181 + { 182 + struct sk_buff *skb; 183 + 184 + skb = __skb_peek(&nn->mbox_cmsg.queue); 185 + while (true) { 186 + if (__nfp_ccm_get_tag(skb) == tag) 187 + return skb; 188 + 189 + if (skb == last) 190 + return NULL; 191 + skb = skb_queue_next(&nn->mbox_cmsg.queue, skb); 192 + } 193 + } 194 + 195 + static void nfp_ccm_mbox_copy_out(struct nfp_net *nn, struct sk_buff *last) 196 + { 197 + struct nfp_ccm_mbox_cmsg_cb *cb; 198 + u8 __iomem *data, *end; 199 + struct sk_buff *skb; 200 + 201 + data = nn->dp.ctrl_bar + nn->tlv_caps.mbox_off + 202 + NFP_NET_CFG_MBOX_SIMPLE_VAL; 203 + end = data + nn->tlv_caps.mbox_len; 204 + 205 + while (true) { 206 + unsigned int length, offset, type; 207 + struct nfp_ccm_hdr hdr; 208 + __be32 *skb_data; 209 + u32 tlv_hdr; 210 + int i, cnt; 211 + 212 + tlv_hdr = readl(data); 213 + type = FIELD_GET(NFP_NET_MBOX_TLV_TYPE, tlv_hdr); 214 + length = FIELD_GET(NFP_NET_MBOX_TLV_LEN, tlv_hdr); 215 + offset = data - nn->dp.ctrl_bar; 216 + 217 + /* Advance past the header */ 218 + data += 4; 219 + 220 + if (data + length > end) { 221 + nn_dp_warn(&nn->dp, "mailbox oversized TLV type:%d offset:%u len:%u\n", 222 + type, offset, length); 223 + break; 224 + } 225 + 226 + if (type == NFP_NET_MBOX_TLV_TYPE_END) 227 + break; 228 + if (type == NFP_NET_MBOX_TLV_TYPE_RESV) 229 + goto next_tlv; 230 + if (type != NFP_NET_MBOX_TLV_TYPE_MSG && 231 + type != NFP_NET_MBOX_TLV_TYPE_MSG_NOSUP) { 232 + nn_dp_warn(&nn->dp, "mailbox unknown TLV type:%d offset:%u len:%u\n", 233 + type, offset, length); 234 + break; 235 + } 236 + 237 + if (length < 4) { 238 + nn_dp_warn(&nn->dp, "mailbox msg too short to contain header TLV type:%d offset:%u len:%u\n", 239 + type, offset, length); 240 + break; 241 + } 242 + 243 + hdr.raw = cpu_to_be32(readl(data)); 244 + 245 + skb = nfp_ccm_mbox_find_req(nn, hdr.tag, last); 246 + if (!skb) { 247 + nn_dp_warn(&nn->dp, "mailbox request not found:%u\n", 248 + be16_to_cpu(hdr.tag)); 249 + break; 250 + } 251 + cb = (void *)skb->cb; 252 + 253 + if (type == NFP_NET_MBOX_TLV_TYPE_MSG_NOSUP) { 254 + nn_dp_warn(&nn->dp, 255 + "mailbox msg not supported type:%d\n", 256 + nfp_ccm_get_type(skb)); 257 + cb->err = -EIO; 258 + goto next_tlv; 259 + } 260 + 261 + if (hdr.type != __NFP_CCM_REPLY(nfp_ccm_get_type(skb))) { 262 + nn_dp_warn(&nn->dp, "mailbox msg reply wrong type:%u expected:%lu\n", 263 + hdr.type, 264 + __NFP_CCM_REPLY(nfp_ccm_get_type(skb))); 265 + cb->err = -EIO; 266 + goto next_tlv; 267 + } 268 + if (cb->exp_reply && length != cb->exp_reply) { 269 + nn_dp_warn(&nn->dp, "mailbox msg reply wrong size type:%u expected:%u have:%u\n", 270 + hdr.type, length, cb->exp_reply); 271 + cb->err = -EIO; 272 + goto next_tlv; 273 + } 274 + if (length > cb->max_len) { 275 + nn_dp_warn(&nn->dp, "mailbox msg oversized reply type:%u max:%u have:%u\n", 276 + hdr.type, cb->max_len, length); 277 + cb->err = -EIO; 278 + goto next_tlv; 279 + } 280 + 281 + if (length <= skb->len) 282 + __skb_trim(skb, length); 283 + else 284 + skb_put(skb, length - skb->len); 285 + 286 + /* We overcopy here slightly, but that's okay, the skb is large 287 + * enough, and the garbage will be ignored (beyond skb->len). 288 + */ 289 + skb_data = (__be32 *)skb->data; 290 + memcpy(skb_data, &hdr, 4); 291 + 292 + cnt = DIV_ROUND_UP(length, 4); 293 + for (i = 1 ; i < cnt; i++) 294 + skb_data[i] = cpu_to_be32(readl(data + i * 4)); 295 + 296 + cb->state = NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND; 297 + next_tlv: 298 + data += round_up(length, 4); 299 + if (data + 4 > end) { 300 + nn_dp_warn(&nn->dp, 301 + "reached end of MBOX without END TLV\n"); 302 + break; 303 + } 304 + } 305 + 306 + smp_wmb(); /* order the skb->data vs. cb->state */ 307 + spin_lock_bh(&nn->mbox_cmsg.queue.lock); 308 + do { 309 + skb = __skb_dequeue(&nn->mbox_cmsg.queue); 310 + cb = (void *)skb->cb; 311 + 312 + if (cb->state != NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND) { 313 + cb->err = -ENOENT; 314 + smp_wmb(); /* order the cb->err vs. cb->state */ 315 + } 316 + cb->state = NFP_NET_MBOX_CMSG_STATE_DONE; 317 + } while (skb != last); 318 + 319 + nfp_ccm_mbox_mark_next_runner(nn); 320 + spin_unlock_bh(&nn->mbox_cmsg.queue.lock); 321 + } 322 + 323 + static void 324 + nfp_ccm_mbox_mark_all_err(struct nfp_net *nn, struct sk_buff *last, int err) 325 + { 326 + struct nfp_ccm_mbox_cmsg_cb *cb; 327 + struct sk_buff *skb; 328 + 329 + spin_lock_bh(&nn->mbox_cmsg.queue.lock); 330 + do { 331 + skb = __skb_dequeue(&nn->mbox_cmsg.queue); 332 + cb = (void *)skb->cb; 333 + 334 + cb->err = err; 335 + smp_wmb(); /* order the cb->err vs. cb->state */ 336 + cb->state = NFP_NET_MBOX_CMSG_STATE_DONE; 337 + } while (skb != last); 338 + 339 + nfp_ccm_mbox_mark_next_runner(nn); 340 + spin_unlock_bh(&nn->mbox_cmsg.queue.lock); 341 + } 342 + 343 + static void nfp_ccm_mbox_run_queue_unlock(struct nfp_net *nn) 344 + __releases(&nn->mbox_cmsg.queue.lock) 345 + { 346 + int space = nn->tlv_caps.mbox_len - NFP_NET_CFG_MBOX_SIMPLE_VAL; 347 + struct sk_buff *skb, *last; 348 + int cnt, err; 349 + 350 + space -= 4; /* for End TLV */ 351 + 352 + /* First skb must fit, because it's ours and we checked it fits */ 353 + cnt = 1; 354 + last = skb = __skb_peek(&nn->mbox_cmsg.queue); 355 + space -= 4 + nfp_ccm_mbox_maxlen(skb); 356 + 357 + while (!skb_queue_is_last(&nn->mbox_cmsg.queue, last)) { 358 + skb = skb_queue_next(&nn->mbox_cmsg.queue, last); 359 + space -= 4 + nfp_ccm_mbox_maxlen(skb); 360 + if (space < 0) 361 + break; 362 + last = skb; 363 + nfp_ccm_mbox_set_busy(skb); 364 + cnt++; 365 + if (cnt == NFP_CCM_MBOX_BATCH_LIMIT) 366 + break; 367 + } 368 + spin_unlock_bh(&nn->mbox_cmsg.queue.lock); 369 + 370 + /* Now we own all skb's marked in progress, new requests may arrive 371 + * at the end of the queue. 372 + */ 373 + 374 + nn_ctrl_bar_lock(nn); 375 + 376 + nfp_ccm_mbox_copy_in(nn, last); 377 + 378 + err = nfp_net_mbox_reconfig(nn, NFP_NET_CFG_MBOX_CMD_TLV_CMSG); 379 + if (!err) 380 + nfp_ccm_mbox_copy_out(nn, last); 381 + else 382 + nfp_ccm_mbox_mark_all_err(nn, last, -EIO); 383 + 384 + nn_ctrl_bar_unlock(nn); 385 + 386 + wake_up_all(&nn->mbox_cmsg.wq); 387 + } 388 + 389 + static int nfp_ccm_mbox_skb_return(struct sk_buff *skb) 390 + { 391 + struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb; 392 + 393 + if (cb->err) 394 + dev_kfree_skb_any(skb); 395 + return cb->err; 396 + } 397 + 398 + /* If wait timed out but the command is already in progress we have 399 + * to wait until it finishes. Runners has ownership of the skbs marked 400 + * as busy. 401 + */ 402 + static int 403 + nfp_ccm_mbox_unlink_unlock(struct nfp_net *nn, struct sk_buff *skb, 404 + enum nfp_ccm_type type) 405 + __releases(&nn->mbox_cmsg.queue.lock) 406 + { 407 + bool was_first; 408 + 409 + if (nfp_ccm_mbox_in_progress(skb)) { 410 + spin_unlock_bh(&nn->mbox_cmsg.queue.lock); 411 + 412 + wait_event(nn->mbox_cmsg.wq, nfp_ccm_mbox_done(skb)); 413 + smp_rmb(); /* pairs with smp_wmb() after data is written */ 414 + return nfp_ccm_mbox_skb_return(skb); 415 + } 416 + 417 + was_first = nfp_ccm_mbox_should_run(nn, skb); 418 + __skb_unlink(skb, &nn->mbox_cmsg.queue); 419 + if (was_first) 420 + nfp_ccm_mbox_mark_next_runner(nn); 421 + 422 + spin_unlock_bh(&nn->mbox_cmsg.queue.lock); 423 + 424 + if (was_first) 425 + wake_up_all(&nn->mbox_cmsg.wq); 426 + 427 + nn_dp_warn(&nn->dp, "time out waiting for mbox response to 0x%02x\n", 428 + type); 429 + return -ETIMEDOUT; 430 + } 431 + 432 + static int 433 + nfp_ccm_mbox_msg_prepare(struct nfp_net *nn, struct sk_buff *skb, 434 + enum nfp_ccm_type type, 435 + unsigned int reply_size, unsigned int max_reply_size, 436 + gfp_t flags) 437 + { 438 + const unsigned int mbox_max = nfp_ccm_mbox_max_msg(nn); 439 + unsigned int max_len; 440 + ssize_t undersize; 441 + int err; 442 + 443 + if (unlikely(!(nn->tlv_caps.mbox_cmsg_types & BIT(type)))) { 444 + nn_dp_warn(&nn->dp, 445 + "message type %d not supported by mailbox\n", type); 446 + return -EINVAL; 447 + } 448 + 449 + /* If the reply size is unknown assume it will take the entire 450 + * mailbox, the callers should do their best for this to never 451 + * happen. 452 + */ 453 + if (!max_reply_size) 454 + max_reply_size = mbox_max; 455 + max_reply_size = round_up(max_reply_size, 4); 456 + 457 + /* Make sure we can fit the entire reply into the skb, 458 + * and that we don't have to slow down the mbox handler 459 + * with allocations. 460 + */ 461 + undersize = max_reply_size - (skb_end_pointer(skb) - skb->data); 462 + if (undersize > 0) { 463 + err = pskb_expand_head(skb, 0, undersize, flags); 464 + if (err) { 465 + nn_dp_warn(&nn->dp, 466 + "can't allocate reply buffer for mailbox\n"); 467 + return err; 468 + } 469 + } 470 + 471 + /* Make sure that request and response both fit into the mailbox */ 472 + max_len = max(max_reply_size, round_up(skb->len, 4)); 473 + if (max_len > mbox_max) { 474 + nn_dp_warn(&nn->dp, 475 + "message too big for tha mailbox: %u/%u vs %u\n", 476 + skb->len, max_reply_size, mbox_max); 477 + return -EMSGSIZE; 478 + } 479 + 480 + nfp_ccm_mbox_msg_init(skb, reply_size, max_len); 481 + 482 + return 0; 483 + } 484 + 485 + static int 486 + nfp_ccm_mbox_msg_enqueue(struct nfp_net *nn, struct sk_buff *skb, 487 + enum nfp_ccm_type type) 488 + { 489 + struct nfp_ccm_hdr *hdr; 490 + 491 + assert_spin_locked(&nn->mbox_cmsg.queue.lock); 492 + 493 + if (nn->mbox_cmsg.queue.qlen >= NFP_CCM_MAX_QLEN) { 494 + nn_dp_warn(&nn->dp, "mailbox request queue too long\n"); 495 + return -EBUSY; 496 + } 497 + 498 + hdr = (void *)skb->data; 499 + hdr->ver = NFP_CCM_ABI_VERSION; 500 + hdr->type = type; 501 + hdr->tag = cpu_to_be16(nn->mbox_cmsg.tag++); 502 + 503 + __skb_queue_tail(&nn->mbox_cmsg.queue, skb); 504 + 505 + return 0; 506 + } 507 + 508 + int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb, 509 + enum nfp_ccm_type type, 510 + unsigned int reply_size, 511 + unsigned int max_reply_size) 512 + { 513 + int err; 514 + 515 + err = nfp_ccm_mbox_msg_prepare(nn, skb, type, reply_size, 516 + max_reply_size, GFP_KERNEL); 517 + if (err) 518 + goto err_free_skb; 519 + 520 + spin_lock_bh(&nn->mbox_cmsg.queue.lock); 521 + 522 + err = nfp_ccm_mbox_msg_enqueue(nn, skb, type); 523 + if (err) 524 + goto err_unlock; 525 + 526 + /* First in queue takes the mailbox lock and processes the batch */ 527 + if (!nfp_ccm_mbox_is_first(nn, skb)) { 528 + bool to; 529 + 530 + spin_unlock_bh(&nn->mbox_cmsg.queue.lock); 531 + 532 + to = !wait_event_timeout(nn->mbox_cmsg.wq, 533 + nfp_ccm_mbox_done(skb) || 534 + nfp_ccm_mbox_should_run(nn, skb), 535 + msecs_to_jiffies(NFP_CCM_TIMEOUT)); 536 + 537 + /* fast path for those completed by another thread */ 538 + if (nfp_ccm_mbox_done(skb)) { 539 + smp_rmb(); /* pairs with wmb after data is written */ 540 + return nfp_ccm_mbox_skb_return(skb); 541 + } 542 + 543 + spin_lock_bh(&nn->mbox_cmsg.queue.lock); 544 + 545 + if (!nfp_ccm_mbox_is_first(nn, skb)) { 546 + WARN_ON(!to); 547 + 548 + err = nfp_ccm_mbox_unlink_unlock(nn, skb, type); 549 + if (err) 550 + goto err_free_skb; 551 + return 0; 552 + } 553 + } 554 + 555 + /* run queue expects the lock held */ 556 + nfp_ccm_mbox_run_queue_unlock(nn); 557 + return nfp_ccm_mbox_skb_return(skb); 558 + 559 + err_unlock: 560 + spin_unlock_bh(&nn->mbox_cmsg.queue.lock); 561 + err_free_skb: 562 + dev_kfree_skb_any(skb); 563 + return err; 564 + } 565 + 566 + struct sk_buff * 567 + nfp_ccm_mbox_alloc(struct nfp_net *nn, unsigned int req_size, 568 + unsigned int reply_size, gfp_t flags) 569 + { 570 + unsigned int max_size; 571 + struct sk_buff *skb; 572 + 573 + if (!reply_size) 574 + max_size = nfp_ccm_mbox_max_msg(nn); 575 + else 576 + max_size = max(req_size, reply_size); 577 + max_size = round_up(max_size, 4); 578 + 579 + skb = alloc_skb(max_size, flags); 580 + if (!skb) 581 + return NULL; 582 + 583 + skb_put(skb, req_size); 584 + 585 + return skb; 586 + } 587 + 588 + bool nfp_ccm_mbox_fits(struct nfp_net *nn, unsigned int size) 589 + { 590 + return nfp_ccm_mbox_max_msg(nn) >= size; 591 + }
+23
drivers/net/ethernet/netronome/nfp/crypto/crypto.h
··· 1 + /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ 2 + /* Copyright (C) 2019 Netronome Systems, Inc. */ 3 + 4 + #ifndef NFP_CRYPTO_H 5 + #define NFP_CRYPTO_H 1 6 + 7 + struct nfp_net_tls_offload_ctx { 8 + __be32 fw_handle[2]; 9 + 10 + u32 next_seq; 11 + bool out_of_sync; 12 + }; 13 + 14 + #ifdef CONFIG_TLS_DEVICE 15 + int nfp_net_tls_init(struct nfp_net *nn); 16 + #else 17 + static inline int nfp_net_tls_init(struct nfp_net *nn) 18 + { 19 + return 0; 20 + } 21 + #endif 22 + 23 + #endif
+82
drivers/net/ethernet/netronome/nfp/crypto/fw.h
··· 1 + /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ 2 + /* Copyright (C) 2019 Netronome Systems, Inc. */ 3 + 4 + #ifndef NFP_CRYPTO_FW_H 5 + #define NFP_CRYPTO_FW_H 1 6 + 7 + #include "../ccm.h" 8 + 9 + #define NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC 0 10 + #define NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC 1 11 + 12 + struct nfp_crypto_reply_simple { 13 + struct nfp_ccm_hdr hdr; 14 + __be32 error; 15 + }; 16 + 17 + struct nfp_crypto_req_reset { 18 + struct nfp_ccm_hdr hdr; 19 + __be32 ep_id; 20 + }; 21 + 22 + #define NFP_NET_TLS_IPVER GENMASK(15, 12) 23 + #define NFP_NET_TLS_VLAN GENMASK(11, 0) 24 + #define NFP_NET_TLS_VLAN_UNUSED 4095 25 + 26 + struct nfp_crypto_req_add_front { 27 + struct nfp_ccm_hdr hdr; 28 + __be32 ep_id; 29 + u8 resv[3]; 30 + u8 opcode; 31 + u8 key_len; 32 + __be16 ipver_vlan __packed; 33 + u8 l4_proto; 34 + }; 35 + 36 + struct nfp_crypto_req_add_back { 37 + __be16 src_port; 38 + __be16 dst_port; 39 + __be32 key[8]; 40 + __be32 salt; 41 + __be32 iv[2]; 42 + __be32 counter; 43 + __be32 rec_no[2]; 44 + __be32 tcp_seq; 45 + }; 46 + 47 + struct nfp_crypto_req_add_v4 { 48 + struct nfp_crypto_req_add_front front; 49 + __be32 src_ip; 50 + __be32 dst_ip; 51 + struct nfp_crypto_req_add_back back; 52 + }; 53 + 54 + struct nfp_crypto_req_add_v6 { 55 + struct nfp_crypto_req_add_front front; 56 + __be32 src_ip[4]; 57 + __be32 dst_ip[4]; 58 + struct nfp_crypto_req_add_back back; 59 + }; 60 + 61 + struct nfp_crypto_reply_add { 62 + struct nfp_ccm_hdr hdr; 63 + __be32 error; 64 + __be32 handle[2]; 65 + }; 66 + 67 + struct nfp_crypto_req_del { 68 + struct nfp_ccm_hdr hdr; 69 + __be32 ep_id; 70 + __be32 handle[2]; 71 + }; 72 + 73 + struct nfp_crypto_req_update { 74 + struct nfp_ccm_hdr hdr; 75 + __be32 ep_id; 76 + u8 resv[3]; 77 + u8 opcode; 78 + __be32 handle[2]; 79 + __be32 rec_no[2]; 80 + __be32 tcp_seq; 81 + }; 82 + #endif
+429
drivers/net/ethernet/netronome/nfp/crypto/tls.c
··· 1 + // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + /* Copyright (C) 2019 Netronome Systems, Inc. */ 3 + 4 + #include <linux/bitfield.h> 5 + #include <linux/ipv6.h> 6 + #include <linux/skbuff.h> 7 + #include <net/tls.h> 8 + 9 + #include "../ccm.h" 10 + #include "../nfp_net.h" 11 + #include "crypto.h" 12 + #include "fw.h" 13 + 14 + #define NFP_NET_TLS_CCM_MBOX_OPS_MASK \ 15 + (BIT(NFP_CCM_TYPE_CRYPTO_RESET) | \ 16 + BIT(NFP_CCM_TYPE_CRYPTO_ADD) | \ 17 + BIT(NFP_CCM_TYPE_CRYPTO_DEL) | \ 18 + BIT(NFP_CCM_TYPE_CRYPTO_UPDATE)) 19 + 20 + #define NFP_NET_TLS_OPCODE_MASK_RX \ 21 + BIT(NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC) 22 + 23 + #define NFP_NET_TLS_OPCODE_MASK_TX \ 24 + BIT(NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC) 25 + 26 + #define NFP_NET_TLS_OPCODE_MASK \ 27 + (NFP_NET_TLS_OPCODE_MASK_RX | NFP_NET_TLS_OPCODE_MASK_TX) 28 + 29 + static void nfp_net_crypto_set_op(struct nfp_net *nn, u8 opcode, bool on) 30 + { 31 + u32 off, val; 32 + 33 + off = nn->tlv_caps.crypto_enable_off + round_down(opcode / 8, 4); 34 + 35 + val = nn_readl(nn, off); 36 + if (on) 37 + val |= BIT(opcode & 31); 38 + else 39 + val &= ~BIT(opcode & 31); 40 + nn_writel(nn, off, val); 41 + } 42 + 43 + static bool 44 + __nfp_net_tls_conn_cnt_changed(struct nfp_net *nn, int add, 45 + enum tls_offload_ctx_dir direction) 46 + { 47 + u8 opcode; 48 + int cnt; 49 + 50 + opcode = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC; 51 + nn->ktls_tx_conn_cnt += add; 52 + cnt = nn->ktls_tx_conn_cnt; 53 + nn->dp.ktls_tx = !!nn->ktls_tx_conn_cnt; 54 + 55 + /* Care only about 0 -> 1 and 1 -> 0 transitions */ 56 + if (cnt > 1) 57 + return false; 58 + 59 + nfp_net_crypto_set_op(nn, opcode, cnt); 60 + return true; 61 + } 62 + 63 + static int 64 + nfp_net_tls_conn_cnt_changed(struct nfp_net *nn, int add, 65 + enum tls_offload_ctx_dir direction) 66 + { 67 + int ret = 0; 68 + 69 + /* Use the BAR lock to protect the connection counts */ 70 + nn_ctrl_bar_lock(nn); 71 + if (__nfp_net_tls_conn_cnt_changed(nn, add, direction)) { 72 + ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_CRYPTO); 73 + /* Undo the cnt adjustment if failed */ 74 + if (ret) 75 + __nfp_net_tls_conn_cnt_changed(nn, -add, direction); 76 + } 77 + nn_ctrl_bar_unlock(nn); 78 + 79 + return ret; 80 + } 81 + 82 + static int 83 + nfp_net_tls_conn_add(struct nfp_net *nn, enum tls_offload_ctx_dir direction) 84 + { 85 + return nfp_net_tls_conn_cnt_changed(nn, 1, direction); 86 + } 87 + 88 + static int 89 + nfp_net_tls_conn_remove(struct nfp_net *nn, enum tls_offload_ctx_dir direction) 90 + { 91 + return nfp_net_tls_conn_cnt_changed(nn, -1, direction); 92 + } 93 + 94 + static struct sk_buff * 95 + nfp_net_tls_alloc_simple(struct nfp_net *nn, size_t req_sz, gfp_t flags) 96 + { 97 + return nfp_ccm_mbox_alloc(nn, req_sz, 98 + sizeof(struct nfp_crypto_reply_simple), 99 + flags); 100 + } 101 + 102 + static int 103 + nfp_net_tls_communicate_simple(struct nfp_net *nn, struct sk_buff *skb, 104 + const char *name, enum nfp_ccm_type type) 105 + { 106 + struct nfp_crypto_reply_simple *reply; 107 + int err; 108 + 109 + err = nfp_ccm_mbox_communicate(nn, skb, type, 110 + sizeof(*reply), sizeof(*reply)); 111 + if (err) { 112 + nn_dp_warn(&nn->dp, "failed to %s TLS: %d\n", name, err); 113 + return err; 114 + } 115 + 116 + reply = (void *)skb->data; 117 + err = -be32_to_cpu(reply->error); 118 + if (err) 119 + nn_dp_warn(&nn->dp, "failed to %s TLS, fw replied: %d\n", 120 + name, err); 121 + dev_consume_skb_any(skb); 122 + 123 + return err; 124 + } 125 + 126 + static void nfp_net_tls_del_fw(struct nfp_net *nn, __be32 *fw_handle) 127 + { 128 + struct nfp_crypto_req_del *req; 129 + struct sk_buff *skb; 130 + 131 + skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL); 132 + if (!skb) 133 + return; 134 + 135 + req = (void *)skb->data; 136 + req->ep_id = 0; 137 + memcpy(req->handle, fw_handle, sizeof(req->handle)); 138 + 139 + nfp_net_tls_communicate_simple(nn, skb, "delete", 140 + NFP_CCM_TYPE_CRYPTO_DEL); 141 + } 142 + 143 + static struct nfp_crypto_req_add_back * 144 + nfp_net_tls_set_ipv4(struct nfp_crypto_req_add_v4 *req, struct sock *sk, 145 + int direction) 146 + { 147 + struct inet_sock *inet = inet_sk(sk); 148 + 149 + req->front.key_len += sizeof(__be32) * 2; 150 + req->front.ipver_vlan = cpu_to_be16(FIELD_PREP(NFP_NET_TLS_IPVER, 4) | 151 + FIELD_PREP(NFP_NET_TLS_VLAN, 152 + NFP_NET_TLS_VLAN_UNUSED)); 153 + 154 + if (direction == TLS_OFFLOAD_CTX_DIR_TX) { 155 + req->src_ip = inet->inet_saddr; 156 + req->dst_ip = inet->inet_daddr; 157 + } else { 158 + req->src_ip = inet->inet_daddr; 159 + req->dst_ip = inet->inet_saddr; 160 + } 161 + 162 + return &req->back; 163 + } 164 + 165 + static struct nfp_crypto_req_add_back * 166 + nfp_net_tls_set_ipv6(struct nfp_crypto_req_add_v6 *req, struct sock *sk, 167 + int direction) 168 + { 169 + #if IS_ENABLED(CONFIG_IPV6) 170 + struct ipv6_pinfo *np = inet6_sk(sk); 171 + 172 + req->front.key_len += sizeof(struct in6_addr) * 2; 173 + req->front.ipver_vlan = cpu_to_be16(FIELD_PREP(NFP_NET_TLS_IPVER, 6) | 174 + FIELD_PREP(NFP_NET_TLS_VLAN, 175 + NFP_NET_TLS_VLAN_UNUSED)); 176 + 177 + if (direction == TLS_OFFLOAD_CTX_DIR_TX) { 178 + memcpy(req->src_ip, &np->saddr, sizeof(req->src_ip)); 179 + memcpy(req->dst_ip, &sk->sk_v6_daddr, sizeof(req->dst_ip)); 180 + } else { 181 + memcpy(req->src_ip, &sk->sk_v6_daddr, sizeof(req->src_ip)); 182 + memcpy(req->dst_ip, &np->saddr, sizeof(req->dst_ip)); 183 + } 184 + 185 + #endif 186 + return &req->back; 187 + } 188 + 189 + static void 190 + nfp_net_tls_set_l4(struct nfp_crypto_req_add_front *front, 191 + struct nfp_crypto_req_add_back *back, struct sock *sk, 192 + int direction) 193 + { 194 + struct inet_sock *inet = inet_sk(sk); 195 + 196 + front->l4_proto = IPPROTO_TCP; 197 + 198 + if (direction == TLS_OFFLOAD_CTX_DIR_TX) { 199 + back->src_port = inet->inet_sport; 200 + back->dst_port = inet->inet_dport; 201 + } else { 202 + back->src_port = inet->inet_dport; 203 + back->dst_port = inet->inet_sport; 204 + } 205 + } 206 + 207 + static u8 nfp_tls_1_2_dir_to_opcode(enum tls_offload_ctx_dir direction) 208 + { 209 + switch (direction) { 210 + case TLS_OFFLOAD_CTX_DIR_TX: 211 + return NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC; 212 + case TLS_OFFLOAD_CTX_DIR_RX: 213 + return NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC; 214 + default: 215 + WARN_ON_ONCE(1); 216 + return 0; 217 + } 218 + } 219 + 220 + static bool 221 + nfp_net_cipher_supported(struct nfp_net *nn, u16 cipher_type, 222 + enum tls_offload_ctx_dir direction) 223 + { 224 + u8 bit; 225 + 226 + switch (cipher_type) { 227 + case TLS_CIPHER_AES_GCM_128: 228 + if (direction == TLS_OFFLOAD_CTX_DIR_TX) 229 + bit = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC; 230 + else 231 + return false; 232 + break; 233 + default: 234 + return false; 235 + } 236 + 237 + return nn->tlv_caps.crypto_ops & BIT(bit); 238 + } 239 + 240 + static int 241 + nfp_net_tls_add(struct net_device *netdev, struct sock *sk, 242 + enum tls_offload_ctx_dir direction, 243 + struct tls_crypto_info *crypto_info, 244 + u32 start_offload_tcp_sn) 245 + { 246 + struct tls12_crypto_info_aes_gcm_128 *tls_ci; 247 + struct nfp_net *nn = netdev_priv(netdev); 248 + struct nfp_crypto_req_add_front *front; 249 + struct nfp_net_tls_offload_ctx *ntls; 250 + struct nfp_crypto_req_add_back *back; 251 + struct nfp_crypto_reply_add *reply; 252 + struct sk_buff *skb; 253 + size_t req_sz; 254 + bool ipv6; 255 + int err; 256 + 257 + BUILD_BUG_ON(sizeof(struct nfp_net_tls_offload_ctx) > 258 + TLS_DRIVER_STATE_SIZE_TX); 259 + 260 + if (!nfp_net_cipher_supported(nn, crypto_info->cipher_type, direction)) 261 + return -EOPNOTSUPP; 262 + 263 + switch (sk->sk_family) { 264 + #if IS_ENABLED(CONFIG_IPV6) 265 + case AF_INET6: 266 + if (sk->sk_ipv6only || 267 + ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) { 268 + req_sz = sizeof(struct nfp_crypto_req_add_v6); 269 + ipv6 = true; 270 + break; 271 + } 272 + #endif 273 + /* fall through */ 274 + case AF_INET: 275 + req_sz = sizeof(struct nfp_crypto_req_add_v4); 276 + ipv6 = false; 277 + break; 278 + default: 279 + return -EOPNOTSUPP; 280 + } 281 + 282 + err = nfp_net_tls_conn_add(nn, direction); 283 + if (err) 284 + return err; 285 + 286 + skb = nfp_ccm_mbox_alloc(nn, req_sz, sizeof(*reply), GFP_KERNEL); 287 + if (!skb) { 288 + err = -ENOMEM; 289 + goto err_conn_remove; 290 + } 291 + 292 + front = (void *)skb->data; 293 + front->ep_id = 0; 294 + front->key_len = 8; 295 + front->opcode = nfp_tls_1_2_dir_to_opcode(direction); 296 + memset(front->resv, 0, sizeof(front->resv)); 297 + 298 + if (ipv6) 299 + back = nfp_net_tls_set_ipv6((void *)skb->data, sk, direction); 300 + else 301 + back = nfp_net_tls_set_ipv4((void *)skb->data, sk, direction); 302 + 303 + nfp_net_tls_set_l4(front, back, sk, direction); 304 + 305 + back->counter = 0; 306 + back->tcp_seq = cpu_to_be32(start_offload_tcp_sn); 307 + 308 + tls_ci = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; 309 + memcpy(back->key, tls_ci->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); 310 + memset(&back->key[TLS_CIPHER_AES_GCM_128_KEY_SIZE / 4], 0, 311 + sizeof(back->key) - TLS_CIPHER_AES_GCM_128_KEY_SIZE); 312 + memcpy(back->iv, tls_ci->iv, TLS_CIPHER_AES_GCM_128_IV_SIZE); 313 + memcpy(&back->salt, tls_ci->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE); 314 + memcpy(back->rec_no, tls_ci->rec_seq, sizeof(tls_ci->rec_seq)); 315 + 316 + err = nfp_ccm_mbox_communicate(nn, skb, NFP_CCM_TYPE_CRYPTO_ADD, 317 + sizeof(*reply), sizeof(*reply)); 318 + if (err) { 319 + nn_dp_warn(&nn->dp, "failed to add TLS: %d\n", err); 320 + /* communicate frees skb on error */ 321 + goto err_conn_remove; 322 + } 323 + 324 + reply = (void *)skb->data; 325 + err = -be32_to_cpu(reply->error); 326 + if (err) { 327 + if (err == -ENOSPC) { 328 + if (!atomic_fetch_inc(&nn->ktls_no_space)) 329 + nn_info(nn, "HW TLS table full\n"); 330 + } else { 331 + nn_dp_warn(&nn->dp, 332 + "failed to add TLS, FW replied: %d\n", err); 333 + } 334 + goto err_free_skb; 335 + } 336 + 337 + if (!reply->handle[0] && !reply->handle[1]) { 338 + nn_dp_warn(&nn->dp, "FW returned NULL handle\n"); 339 + goto err_fw_remove; 340 + } 341 + 342 + ntls = tls_driver_ctx(sk, direction); 343 + memcpy(ntls->fw_handle, reply->handle, sizeof(ntls->fw_handle)); 344 + ntls->next_seq = start_offload_tcp_sn; 345 + dev_consume_skb_any(skb); 346 + 347 + return 0; 348 + 349 + err_fw_remove: 350 + nfp_net_tls_del_fw(nn, reply->handle); 351 + err_free_skb: 352 + dev_consume_skb_any(skb); 353 + err_conn_remove: 354 + nfp_net_tls_conn_remove(nn, direction); 355 + return err; 356 + } 357 + 358 + static void 359 + nfp_net_tls_del(struct net_device *netdev, struct tls_context *tls_ctx, 360 + enum tls_offload_ctx_dir direction) 361 + { 362 + struct nfp_net *nn = netdev_priv(netdev); 363 + struct nfp_net_tls_offload_ctx *ntls; 364 + 365 + nfp_net_tls_conn_remove(nn, direction); 366 + 367 + ntls = __tls_driver_ctx(tls_ctx, direction); 368 + nfp_net_tls_del_fw(nn, ntls->fw_handle); 369 + } 370 + 371 + static const struct tlsdev_ops nfp_net_tls_ops = { 372 + .tls_dev_add = nfp_net_tls_add, 373 + .tls_dev_del = nfp_net_tls_del, 374 + }; 375 + 376 + static int nfp_net_tls_reset(struct nfp_net *nn) 377 + { 378 + struct nfp_crypto_req_reset *req; 379 + struct sk_buff *skb; 380 + 381 + skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL); 382 + if (!skb) 383 + return -ENOMEM; 384 + 385 + req = (void *)skb->data; 386 + req->ep_id = 0; 387 + 388 + return nfp_net_tls_communicate_simple(nn, skb, "reset", 389 + NFP_CCM_TYPE_CRYPTO_RESET); 390 + } 391 + 392 + int nfp_net_tls_init(struct nfp_net *nn) 393 + { 394 + struct net_device *netdev = nn->dp.netdev; 395 + int err; 396 + 397 + if (!(nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK)) 398 + return 0; 399 + 400 + if ((nn->tlv_caps.mbox_cmsg_types & NFP_NET_TLS_CCM_MBOX_OPS_MASK) != 401 + NFP_NET_TLS_CCM_MBOX_OPS_MASK) 402 + return 0; 403 + 404 + if (!nfp_ccm_mbox_fits(nn, sizeof(struct nfp_crypto_req_add_v6))) { 405 + nn_warn(nn, "disabling TLS offload - mbox too small: %d\n", 406 + nn->tlv_caps.mbox_len); 407 + return 0; 408 + } 409 + 410 + err = nfp_net_tls_reset(nn); 411 + if (err) 412 + return err; 413 + 414 + nn_ctrl_bar_lock(nn); 415 + nn_writel(nn, nn->tlv_caps.crypto_enable_off, 0); 416 + err = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_CRYPTO); 417 + nn_ctrl_bar_unlock(nn); 418 + if (err) 419 + return err; 420 + 421 + if (nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK_TX) { 422 + netdev->hw_features |= NETIF_F_HW_TLS_TX; 423 + netdev->features |= NETIF_F_HW_TLS_TX; 424 + } 425 + 426 + netdev->tlsdev_ops = &nfp_net_tls_ops; 427 + 428 + return 0; 429 + }
+41 -7
drivers/net/ethernet/netronome/nfp/nfp_net.h
··· 12 12 #ifndef _NFP_NET_H_ 13 13 #define _NFP_NET_H_ 14 14 15 + #include <linux/atomic.h> 15 16 #include <linux/interrupt.h> 16 17 #include <linux/list.h> 17 18 #include <linux/netdevice.h> 18 19 #include <linux/pci.h> 19 20 #include <linux/io-64-nonatomic-hi-lo.h> 21 + #include <linux/semaphore.h> 20 22 #include <net/xdp.h> 21 23 22 24 #include "nfp_net_ctrl.h" ··· 374 372 * @hw_csum_tx_inner: Counter of inner TX checksum offload requests 375 373 * @tx_gather: Counter of packets with Gather DMA 376 374 * @tx_lso: Counter of LSO packets sent 375 + * @hw_tls_tx: Counter of TLS packets sent with crypto offloaded to HW 376 + * @tls_tx_fallback: Counter of TLS packets sent which had to be encrypted 377 + * by the fallback path because packets came out of order 378 + * @tls_tx_no_fallback: Counter of TLS packets not sent because the fallback 379 + * path could not encrypt them 377 380 * @tx_errors: How many TX errors were encountered 378 381 * @tx_busy: How often was TX busy (no space)? 379 382 * @rx_replace_buf_alloc_fail: Counter of RX buffer allocation failures ··· 416 409 u64 hw_csum_rx_inner_ok; 417 410 u64 hw_csum_rx_complete; 418 411 412 + u64 hw_csum_rx_error; 413 + u64 rx_replace_buf_alloc_fail; 414 + 419 415 struct nfp_net_tx_ring *xdp_ring; 420 416 421 417 struct u64_stats_sync tx_sync; 422 418 u64 tx_pkts; 423 419 u64 tx_bytes; 424 - u64 hw_csum_tx; 420 + 421 + u64 ____cacheline_aligned_in_smp hw_csum_tx; 425 422 u64 hw_csum_tx_inner; 426 423 u64 tx_gather; 427 424 u64 tx_lso; 425 + u64 hw_tls_tx; 428 426 429 - u64 hw_csum_rx_error; 430 - u64 rx_replace_buf_alloc_fail; 427 + u64 tls_tx_fallback; 428 + u64 tls_tx_no_fallback; 431 429 u64 tx_errors; 432 430 u64 tx_busy; 431 + 432 + /* Cold data follows */ 433 433 434 434 u32 irq_vector; 435 435 irq_handler_t handler; ··· 472 458 * @netdev: Backpointer to net_device structure 473 459 * @is_vf: Is the driver attached to a VF? 474 460 * @chained_metadata_format: Firemware will use new metadata format 461 + * @ktls_tx: Is kTLS TX enabled? 475 462 * @rx_dma_dir: Mapping direction for RX buffers 476 463 * @rx_dma_off: Offset at which DMA packets (for XDP headroom) 477 464 * @rx_offset: Offset in the RX buffers where packet data starts ··· 497 482 498 483 u8 is_vf:1; 499 484 u8 chained_metadata_format:1; 485 + u8 ktls_tx:1; 500 486 501 487 u8 rx_dma_dir; 502 488 u8 rx_offset; ··· 565 549 * @reconfig_timer: Timer for async reading of reconfig results 566 550 * @reconfig_in_progress_update: Update FW is processing now (debug only) 567 551 * @bar_lock: vNIC config BAR access lock, protects: update, 568 - * mailbox area 552 + * mailbox area, crypto TLV 569 553 * @link_up: Is the link up? 570 554 * @link_status_lock: Protects @link_* and ensures atomicity with BAR reading 571 555 * @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter ··· 578 562 * @tx_bar: Pointer to mapped TX queues 579 563 * @rx_bar: Pointer to mapped FL/RX queues 580 564 * @tlv_caps: Parsed TLV capabilities 565 + * @ktls_tx_conn_cnt: Number of offloaded kTLS TX connections 566 + * @ktls_no_space: Counter of firmware rejecting kTLS connection due to 567 + * lack of space 568 + * @mbox_cmsg: Common Control Message via vNIC mailbox state 569 + * @mbox_cmsg.queue: CCM mbox queue of pending messages 570 + * @mbox_cmsg.wq: CCM mbox wait queue of waiting processes 571 + * @mbox_cmsg.tag: CCM mbox message tag allocator 581 572 * @debugfs_dir: Device directory in debugfs 582 573 * @vnic_list: Entry on device vNIC list 583 574 * @pdev: Backpointer to PCI device ··· 643 620 struct timer_list reconfig_timer; 644 621 u32 reconfig_in_progress_update; 645 622 646 - struct mutex bar_lock; 623 + struct semaphore bar_lock; 647 624 648 625 u32 rx_coalesce_usecs; 649 626 u32 rx_coalesce_max_frames; ··· 659 636 u8 __iomem *rx_bar; 660 637 661 638 struct nfp_net_tlv_caps tlv_caps; 639 + 640 + unsigned int ktls_tx_conn_cnt; 641 + 642 + atomic_t ktls_no_space; 643 + 644 + struct { 645 + struct sk_buff_head queue; 646 + wait_queue_head_t wq; 647 + u16 tag; 648 + } mbox_cmsg; 662 649 663 650 struct dentry *debugfs_dir; 664 651 ··· 881 848 882 849 static inline void nn_ctrl_bar_lock(struct nfp_net *nn) 883 850 { 884 - mutex_lock(&nn->bar_lock); 851 + down(&nn->bar_lock); 885 852 } 886 853 887 854 static inline void nn_ctrl_bar_unlock(struct nfp_net *nn) 888 855 { 889 - mutex_unlock(&nn->bar_lock); 856 + up(&nn->bar_lock); 890 857 } 891 858 892 859 /* Globals */ ··· 916 883 917 884 void nfp_net_set_ethtool_ops(struct net_device *netdev); 918 885 void nfp_net_info(struct nfp_net *nn); 886 + int __nfp_net_reconfig(struct nfp_net *nn, u32 update); 919 887 int nfp_net_reconfig(struct nfp_net *nn, u32 update); 920 888 unsigned int nfp_net_rss_key_sz(struct nfp_net *nn); 921 889 void nfp_net_rss_write_itbl(struct nfp_net *nn);
+124 -23
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
··· 23 23 #include <linux/interrupt.h> 24 24 #include <linux/ip.h> 25 25 #include <linux/ipv6.h> 26 - #include <linux/lockdep.h> 27 26 #include <linux/mm.h> 28 27 #include <linux/overflow.h> 29 28 #include <linux/page_ref.h> ··· 36 37 #include <linux/vmalloc.h> 37 38 #include <linux/ktime.h> 38 39 40 + #include <net/tls.h> 39 41 #include <net/vxlan.h> 40 42 41 43 #include "nfpcore/nfp_nsp.h" ··· 45 45 #include "nfp_net.h" 46 46 #include "nfp_net_sriov.h" 47 47 #include "nfp_port.h" 48 + #include "crypto/crypto.h" 48 49 49 50 /** 50 51 * nfp_net_get_fw_version() - Read and parse the FW version ··· 272 271 * 273 272 * Return: Negative errno on error, 0 on success 274 273 */ 275 - static int __nfp_net_reconfig(struct nfp_net *nn, u32 update) 274 + int __nfp_net_reconfig(struct nfp_net *nn, u32 update) 276 275 { 277 276 int ret; 278 - 279 - lockdep_assert_held(&nn->bar_lock); 280 277 281 278 nfp_net_reconfig_sync_enter(nn); 282 279 ··· 330 331 u32 mbox = nn->tlv_caps.mbox_off; 331 332 int ret; 332 333 333 - lockdep_assert_held(&nn->bar_lock); 334 334 nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd); 335 335 336 336 ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX); ··· 802 804 u64_stats_update_end(&r_vec->tx_sync); 803 805 } 804 806 807 + #ifdef CONFIG_TLS_DEVICE 808 + static struct sk_buff * 809 + nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, 810 + struct sk_buff *skb, u64 *tls_handle, int *nr_frags) 811 + { 812 + struct nfp_net_tls_offload_ctx *ntls; 813 + struct sk_buff *nskb; 814 + u32 datalen, seq; 815 + 816 + if (likely(!dp->ktls_tx)) 817 + return skb; 818 + if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)) 819 + return skb; 820 + 821 + datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)); 822 + seq = ntohl(tcp_hdr(skb)->seq); 823 + ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX); 824 + if (unlikely(ntls->next_seq != seq || ntls->out_of_sync)) { 825 + /* Pure ACK out of order already */ 826 + if (!datalen) 827 + return skb; 828 + 829 + u64_stats_update_begin(&r_vec->tx_sync); 830 + r_vec->tls_tx_fallback++; 831 + u64_stats_update_end(&r_vec->tx_sync); 832 + 833 + nskb = tls_encrypt_skb(skb); 834 + if (!nskb) { 835 + u64_stats_update_begin(&r_vec->tx_sync); 836 + r_vec->tls_tx_no_fallback++; 837 + u64_stats_update_end(&r_vec->tx_sync); 838 + return NULL; 839 + } 840 + /* encryption wasn't necessary */ 841 + if (nskb == skb) 842 + return skb; 843 + /* we don't re-check ring space */ 844 + if (unlikely(skb_is_nonlinear(nskb))) { 845 + nn_dp_warn(dp, "tls_encrypt_skb() produced fragmented frame\n"); 846 + u64_stats_update_begin(&r_vec->tx_sync); 847 + r_vec->tx_errors++; 848 + u64_stats_update_end(&r_vec->tx_sync); 849 + dev_kfree_skb_any(nskb); 850 + return NULL; 851 + } 852 + 853 + /* jump forward, a TX may have gotten lost, need to sync TX */ 854 + if (!ntls->out_of_sync && seq - ntls->next_seq < U32_MAX / 4) 855 + ntls->out_of_sync = true; 856 + 857 + *nr_frags = 0; 858 + return nskb; 859 + } 860 + 861 + if (datalen) { 862 + u64_stats_update_begin(&r_vec->tx_sync); 863 + r_vec->hw_tls_tx++; 864 + u64_stats_update_end(&r_vec->tx_sync); 865 + } 866 + 867 + memcpy(tls_handle, ntls->fw_handle, sizeof(ntls->fw_handle)); 868 + ntls->next_seq += datalen; 869 + return skb; 870 + } 871 + #endif 872 + 805 873 static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring) 806 874 { 807 875 wmb(); ··· 875 811 tx_ring->wr_ptr_add = 0; 876 812 } 877 813 878 - static int nfp_net_prep_port_id(struct sk_buff *skb) 814 + static int nfp_net_prep_tx_meta(struct sk_buff *skb, u64 tls_handle) 879 815 { 880 816 struct metadata_dst *md_dst = skb_metadata_dst(skb); 881 817 unsigned char *data; 818 + u32 meta_id = 0; 819 + int md_bytes; 882 820 883 - if (likely(!md_dst)) 821 + if (likely(!md_dst && !tls_handle)) 884 822 return 0; 885 - if (unlikely(md_dst->type != METADATA_HW_PORT_MUX)) 886 - return 0; 823 + if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX)) { 824 + if (!tls_handle) 825 + return 0; 826 + md_dst = NULL; 827 + } 887 828 888 - if (unlikely(skb_cow_head(skb, 8))) 829 + md_bytes = 4 + !!md_dst * 4 + !!tls_handle * 8; 830 + 831 + if (unlikely(skb_cow_head(skb, md_bytes))) 889 832 return -ENOMEM; 890 833 891 - data = skb_push(skb, 8); 892 - put_unaligned_be32(NFP_NET_META_PORTID, data); 893 - put_unaligned_be32(md_dst->u.port_info.port_id, data + 4); 834 + meta_id = 0; 835 + data = skb_push(skb, md_bytes) + md_bytes; 836 + if (md_dst) { 837 + data -= 4; 838 + put_unaligned_be32(md_dst->u.port_info.port_id, data); 839 + meta_id = NFP_NET_META_PORTID; 840 + } 841 + if (tls_handle) { 842 + /* conn handle is opaque, we just use u64 to be able to quickly 843 + * compare it to zero 844 + */ 845 + data -= 8; 846 + memcpy(data, &tls_handle, sizeof(tls_handle)); 847 + meta_id <<= NFP_NET_META_FIELD_SIZE; 848 + meta_id |= NFP_NET_META_CONN_HANDLE; 849 + } 894 850 895 - return 8; 851 + data -= 4; 852 + put_unaligned_be32(meta_id, data); 853 + 854 + return md_bytes; 896 855 } 897 856 898 857 /** ··· 938 851 struct nfp_net_dp *dp; 939 852 dma_addr_t dma_addr; 940 853 unsigned int fsize; 854 + u64 tls_handle = 0; 941 855 u16 qidx; 942 856 943 857 dp = &nn->dp; ··· 960 872 return NETDEV_TX_BUSY; 961 873 } 962 874 963 - md_bytes = nfp_net_prep_port_id(skb); 964 - if (unlikely(md_bytes < 0)) { 875 + #ifdef CONFIG_TLS_DEVICE 876 + skb = nfp_net_tls_tx(dp, r_vec, skb, &tls_handle, &nr_frags); 877 + if (unlikely(!skb)) { 965 878 nfp_net_tx_xmit_more_flush(tx_ring); 966 - dev_kfree_skb_any(skb); 967 879 return NETDEV_TX_OK; 968 880 } 881 + #endif 882 + 883 + md_bytes = nfp_net_prep_tx_meta(skb, tls_handle); 884 + if (unlikely(md_bytes < 0)) 885 + goto err_flush; 969 886 970 887 /* Start with the head skbuf */ 971 888 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb), 972 889 DMA_TO_DEVICE); 973 890 if (dma_mapping_error(dp->dev, dma_addr)) 974 - goto err_free; 891 + goto err_dma_err; 975 892 976 893 wr_idx = D_IDX(tx_ring, tx_ring->wr_p); 977 894 ··· 1072 979 tx_ring->txbufs[wr_idx].skb = NULL; 1073 980 tx_ring->txbufs[wr_idx].dma_addr = 0; 1074 981 tx_ring->txbufs[wr_idx].fidx = -2; 1075 - err_free: 982 + err_dma_err: 1076 983 nn_dp_warn(dp, "Failed to map DMA TX buffer\n"); 984 + err_flush: 1077 985 nfp_net_tx_xmit_more_flush(tx_ring); 1078 986 u64_stats_update_begin(&r_vec->tx_sync); 1079 987 r_vec->tx_errors++; ··· 3798 3704 nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT; 3799 3705 nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT; 3800 3706 3801 - mutex_init(&nn->bar_lock); 3707 + sema_init(&nn->bar_lock, 1); 3802 3708 3803 3709 spin_lock_init(&nn->reconfig_lock); 3804 3710 spin_lock_init(&nn->link_status_lock); 3805 3711 3806 3712 timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0); 3713 + 3714 + skb_queue_head_init(&nn->mbox_cmsg.queue); 3715 + init_waitqueue_head(&nn->mbox_cmsg.wq); 3807 3716 3808 3717 err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar, 3809 3718 &nn->tlv_caps); ··· 3830 3733 void nfp_net_free(struct nfp_net *nn) 3831 3734 { 3832 3735 WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted); 3833 - 3834 - mutex_destroy(&nn->bar_lock); 3736 + WARN_ON(!skb_queue_empty(&nn->mbox_cmsg.queue)); 3835 3737 3836 3738 if (nn->dp.netdev) 3837 3739 free_netdev(nn->dp.netdev); ··· 4105 4009 if (err) 4106 4010 return err; 4107 4011 4108 - if (nn->dp.netdev) 4012 + if (nn->dp.netdev) { 4109 4013 nfp_net_netdev_init(nn); 4014 + 4015 + err = nfp_net_tls_init(nn); 4016 + if (err) 4017 + return err; 4018 + } 4110 4019 4111 4020 nfp_net_vecs_init(nn); 4112 4021
+15
drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
··· 99 99 100 100 caps->repr_cap = readl(data); 101 101 break; 102 + case NFP_NET_CFG_TLV_TYPE_MBOX_CMSG_TYPES: 103 + if (length >= 4) 104 + caps->mbox_cmsg_types = readl(data); 105 + break; 106 + case NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS: 107 + if (length < 32) { 108 + dev_err(dev, 109 + "CRYPTO OPS TLV should be at least 32B, is %dB offset:%u\n", 110 + length, offset); 111 + return -EINVAL; 112 + } 113 + 114 + caps->crypto_ops = readl(data); 115 + caps->crypto_enable_off = data - ctrl_mem + 16; 116 + break; 102 117 default: 103 118 if (!FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr)) 104 119 break;
+21
drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
··· 44 44 #define NFP_NET_META_MARK 2 45 45 #define NFP_NET_META_PORTID 5 46 46 #define NFP_NET_META_CSUM 6 /* checksum complete type */ 47 + #define NFP_NET_META_CONN_HANDLE 7 47 48 48 49 #define NFP_META_PORT_ID_CTRL ~0U 49 50 ··· 136 135 #define NFP_NET_CFG_UPDATE_MACADDR (0x1 << 11) /* MAC address change */ 137 136 #define NFP_NET_CFG_UPDATE_MBOX (0x1 << 12) /* Mailbox update */ 138 137 #define NFP_NET_CFG_UPDATE_VF (0x1 << 13) /* VF settings change */ 138 + #define NFP_NET_CFG_UPDATE_CRYPTO (0x1 << 14) /* Crypto on/off */ 139 139 #define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */ 140 140 #define NFP_NET_CFG_TXRS_ENABLE 0x0008 141 141 #define NFP_NET_CFG_RXRS_ENABLE 0x0010 ··· 396 394 #define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2 397 395 398 396 #define NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET 5 397 + #define NFP_NET_CFG_MBOX_CMD_TLV_CMSG 6 399 398 400 399 /** 401 400 * VLAN filtering using general use mailbox ··· 469 466 * %NFP_NET_CFG_TLV_TYPE_REPR_CAP: 470 467 * Single word, equivalent of %NFP_NET_CFG_CAP for representors, features which 471 468 * can be used on representors. 469 + * 470 + * %NFP_NET_CFG_TLV_TYPE_MBOX_CMSG_TYPES: 471 + * Variable, bitmap of control message types supported by the mailbox handler. 472 + * Bit 0 corresponds to message type 0, bit 1 to 1, etc. Control messages are 473 + * encapsulated into simple TLVs, with an end TLV and written to the Mailbox. 474 + * 475 + * %NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS: 476 + * 8 words, bitmaps of supported and enabled crypto operations. 477 + * First 16B (4 words) contains a bitmap of supported crypto operations, 478 + * and next 16B contain the enabled operations. 472 479 */ 473 480 #define NFP_NET_CFG_TLV_TYPE_UNKNOWN 0 474 481 #define NFP_NET_CFG_TLV_TYPE_RESERVED 1 ··· 488 475 #define NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL0 5 489 476 #define NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL1 6 490 477 #define NFP_NET_CFG_TLV_TYPE_REPR_CAP 7 478 + #define NFP_NET_CFG_TLV_TYPE_MBOX_CMSG_TYPES 10 479 + #define NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS 11 /* see crypto/fw.h */ 491 480 492 481 struct device; 493 482 ··· 499 484 * @mbox_off: vNIC mailbox area offset 500 485 * @mbox_len: vNIC mailbox area length 501 486 * @repr_cap: capabilities for representors 487 + * @mbox_cmsg_types: cmsgs which can be passed through the mailbox 488 + * @crypto_ops: supported crypto operations 489 + * @crypto_enable_off: offset of crypto ops enable region 502 490 */ 503 491 struct nfp_net_tlv_caps { 504 492 u32 me_freq_mhz; 505 493 unsigned int mbox_off; 506 494 unsigned int mbox_len; 507 495 u32 repr_cap; 496 + u32 mbox_cmsg_types; 497 + u32 crypto_ops; 498 + unsigned int crypto_enable_off; 508 499 }; 509 500 510 501 int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
+14 -2
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
··· 150 150 151 151 #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats) 152 152 #define NN_ET_SWITCH_STATS_LEN 9 153 - #define NN_RVEC_GATHER_STATS 9 153 + #define NN_RVEC_GATHER_STATS 12 154 154 #define NN_RVEC_PER_Q_STATS 3 155 + #define NN_CTRL_PATH_STATS 1 155 156 156 157 #define SFP_SFF_REV_COMPLIANCE 1 157 158 ··· 424 423 { 425 424 struct nfp_net *nn = netdev_priv(netdev); 426 425 427 - return NN_RVEC_GATHER_STATS + nn->max_r_vecs * NN_RVEC_PER_Q_STATS; 426 + return NN_RVEC_GATHER_STATS + nn->max_r_vecs * NN_RVEC_PER_Q_STATS + 427 + NN_CTRL_PATH_STATS; 428 428 } 429 429 430 430 static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data) ··· 448 446 data = nfp_pr_et(data, "hw_tx_inner_csum"); 449 447 data = nfp_pr_et(data, "tx_gather"); 450 448 data = nfp_pr_et(data, "tx_lso"); 449 + data = nfp_pr_et(data, "tx_tls_encrypted"); 450 + data = nfp_pr_et(data, "tx_tls_ooo"); 451 + data = nfp_pr_et(data, "tx_tls_drop_no_sync_data"); 452 + 453 + data = nfp_pr_et(data, "hw_tls_no_space"); 451 454 452 455 return data; 453 456 } ··· 485 478 tmp[6] = nn->r_vecs[i].hw_csum_tx_inner; 486 479 tmp[7] = nn->r_vecs[i].tx_gather; 487 480 tmp[8] = nn->r_vecs[i].tx_lso; 481 + tmp[9] = nn->r_vecs[i].hw_tls_tx; 482 + tmp[10] = nn->r_vecs[i].tls_tx_fallback; 483 + tmp[11] = nn->r_vecs[i].tls_tx_no_fallback; 488 484 } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start)); 489 485 490 486 data += NN_RVEC_PER_Q_STATS; ··· 498 488 499 489 for (j = 0; j < NN_RVEC_GATHER_STATS; j++) 500 490 *data++ = gathered_stats[j]; 491 + 492 + *data++ = atomic_read(&nn->ktls_no_space); 501 493 502 494 return data; 503 495 }
+25 -7
include/net/tls.h
··· 40 40 #include <linux/socket.h> 41 41 #include <linux/tcp.h> 42 42 #include <linux/skmsg.h> 43 + #include <linux/netdevice.h> 43 44 44 45 #include <net/tcp.h> 45 46 #include <net/strparser.h> ··· 198 197 199 198 struct scatterlist sg_tx_data[MAX_SKB_FRAGS]; 200 199 void (*sk_destruct)(struct sock *sk); 201 - u8 driver_state[]; 200 + u8 driver_state[] __aligned(8); 202 201 /* The TLS layer reserves room for driver specific state 203 202 * Currently the belief is that there is not enough 204 203 * driver specific state to justify another layer of indirection 205 204 */ 206 - #define TLS_DRIVER_STATE_SIZE (max_t(size_t, 8, sizeof(void *))) 205 + #define TLS_DRIVER_STATE_SIZE_TX 16 207 206 }; 208 207 209 208 #define TLS_OFFLOAD_CONTEXT_SIZE_TX \ 210 - (ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \ 211 - TLS_DRIVER_STATE_SIZE) 209 + (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX) 212 210 213 211 struct cipher_context { 214 212 char *iv; ··· 302 302 /* sw must be the first member of tls_offload_context_rx */ 303 303 struct tls_sw_context_rx sw; 304 304 atomic64_t resync_req; 305 - u8 driver_state[]; 305 + u8 driver_state[] __aligned(8); 306 306 /* The TLS layer reserves room for driver specific state 307 307 * Currently the belief is that there is not enough 308 308 * driver specific state to justify another layer of indirection 309 309 */ 310 + #define TLS_DRIVER_STATE_SIZE_RX 8 310 311 }; 311 312 312 313 #define TLS_OFFLOAD_CONTEXT_SIZE_RX \ 313 - (ALIGN(sizeof(struct tls_offload_context_rx), sizeof(void *)) + \ 314 - TLS_DRIVER_STATE_SIZE) 314 + (sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX) 315 315 316 316 int wait_on_pending_writer(struct sock *sk, long *timeo); 317 317 int tls_sk_query(struct sock *sk, int optname, char __user *optval, ··· 556 556 return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx; 557 557 } 558 558 559 + #if IS_ENABLED(CONFIG_TLS_DEVICE) 560 + static inline void *__tls_driver_ctx(struct tls_context *tls_ctx, 561 + enum tls_offload_ctx_dir direction) 562 + { 563 + if (direction == TLS_OFFLOAD_CTX_DIR_TX) 564 + return tls_offload_ctx_tx(tls_ctx)->driver_state; 565 + else 566 + return tls_offload_ctx_rx(tls_ctx)->driver_state; 567 + } 568 + 569 + static inline void * 570 + tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction) 571 + { 572 + return __tls_driver_ctx(tls_get_ctx(sk), direction); 573 + } 574 + #endif 575 + 559 576 /* The TLS context is valid until sk_destruct is called */ 560 577 static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq) 561 578 { ··· 590 573 int tls_device_decrypted(struct sock *sk, struct sk_buff *skb); 591 574 int decrypt_skb(struct sock *sk, struct sk_buff *skb, 592 575 struct scatterlist *sgout); 576 + struct sk_buff *tls_encrypt_skb(struct sk_buff *skb); 593 577 594 578 struct sk_buff *tls_validate_xmit_skb(struct sock *sk, 595 579 struct net_device *dev,
+6
net/tls/tls_device_fallback.c
··· 426 426 } 427 427 EXPORT_SYMBOL_GPL(tls_validate_xmit_skb); 428 428 429 + struct sk_buff *tls_encrypt_skb(struct sk_buff *skb) 430 + { 431 + return tls_sw_fallback(skb->sk, skb); 432 + } 433 + EXPORT_SYMBOL_GPL(tls_encrypt_skb); 434 + 429 435 int tls_sw_fallback_init(struct sock *sk, 430 436 struct tls_offload_context_tx *offload_ctx, 431 437 struct tls_crypto_info *crypto_info)