at v4.14 315 lines 8.2 kB view raw
1/* 2 * Copyright (c) 2017 Mellanox Technologies Inc. All rights reserved. 3 * Copyright (c) 2010 Voltaire Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__ 35 36#include <linux/export.h> 37#include <net/netlink.h> 38#include <net/net_namespace.h> 39#include <net/sock.h> 40#include <rdma/rdma_netlink.h> 41#include <linux/module.h> 42#include "core_priv.h" 43 44#include "core_priv.h" 45 46static DEFINE_MUTEX(rdma_nl_mutex); 47static struct sock *nls; 48static struct { 49 const struct rdma_nl_cbs *cb_table; 50} rdma_nl_types[RDMA_NL_NUM_CLIENTS]; 51 52int rdma_nl_chk_listeners(unsigned int group) 53{ 54 return (netlink_has_listeners(nls, group)) ? 0 : -1; 55} 56EXPORT_SYMBOL(rdma_nl_chk_listeners); 57 58static bool is_nl_msg_valid(unsigned int type, unsigned int op) 59{ 60 static const unsigned int max_num_ops[RDMA_NL_NUM_CLIENTS] = { 61 [RDMA_NL_RDMA_CM] = RDMA_NL_RDMA_CM_NUM_OPS, 62 [RDMA_NL_IWCM] = RDMA_NL_IWPM_NUM_OPS, 63 [RDMA_NL_LS] = RDMA_NL_LS_NUM_OPS, 64 [RDMA_NL_NLDEV] = RDMA_NLDEV_NUM_OPS, 65 }; 66 67 /* 68 * This BUILD_BUG_ON is intended to catch addition of new 69 * RDMA netlink protocol without updating the array above. 70 */ 71 BUILD_BUG_ON(RDMA_NL_NUM_CLIENTS != 6); 72 73 if (type >= RDMA_NL_NUM_CLIENTS) 74 return false; 75 76 return (op < max_num_ops[type]) ? true : false; 77} 78 79static bool is_nl_valid(unsigned int type, unsigned int op) 80{ 81 const struct rdma_nl_cbs *cb_table; 82 83 if (!is_nl_msg_valid(type, op)) 84 return false; 85 86 cb_table = rdma_nl_types[type].cb_table; 87#ifdef CONFIG_MODULES 88 if (!cb_table) { 89 mutex_unlock(&rdma_nl_mutex); 90 request_module("rdma-netlink-subsys-%d", type); 91 mutex_lock(&rdma_nl_mutex); 92 cb_table = rdma_nl_types[type].cb_table; 93 } 94#endif 95 96 if (!cb_table || (!cb_table[op].dump && !cb_table[op].doit)) 97 return false; 98 return true; 99} 100 101void rdma_nl_register(unsigned int index, 102 const struct rdma_nl_cbs cb_table[]) 103{ 104 mutex_lock(&rdma_nl_mutex); 105 if (!is_nl_msg_valid(index, 0)) { 106 /* 107 * All clients are not interesting in success/failure of 108 * this call. They want to see the print to error log and 109 * continue their initialization. Print warning for them, 110 * because it is programmer's error to be here. 111 */ 112 mutex_unlock(&rdma_nl_mutex); 113 WARN(true, 114 "The not-valid %u index was supplied to RDMA netlink\n", 115 index); 116 return; 117 } 118 119 if (rdma_nl_types[index].cb_table) { 120 mutex_unlock(&rdma_nl_mutex); 121 WARN(true, 122 "The %u index is already registered in RDMA netlink\n", 123 index); 124 return; 125 } 126 127 rdma_nl_types[index].cb_table = cb_table; 128 mutex_unlock(&rdma_nl_mutex); 129} 130EXPORT_SYMBOL(rdma_nl_register); 131 132void rdma_nl_unregister(unsigned int index) 133{ 134 mutex_lock(&rdma_nl_mutex); 135 rdma_nl_types[index].cb_table = NULL; 136 mutex_unlock(&rdma_nl_mutex); 137} 138EXPORT_SYMBOL(rdma_nl_unregister); 139 140void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq, 141 int len, int client, int op, int flags) 142{ 143 *nlh = nlmsg_put(skb, 0, seq, RDMA_NL_GET_TYPE(client, op), len, flags); 144 if (!*nlh) 145 return NULL; 146 return nlmsg_data(*nlh); 147} 148EXPORT_SYMBOL(ibnl_put_msg); 149 150int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh, 151 int len, void *data, int type) 152{ 153 if (nla_put(skb, type, len, data)) { 154 nlmsg_cancel(skb, nlh); 155 return -EMSGSIZE; 156 } 157 return 0; 158} 159EXPORT_SYMBOL(ibnl_put_attr); 160 161static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, 162 struct netlink_ext_ack *extack) 163{ 164 int type = nlh->nlmsg_type; 165 unsigned int index = RDMA_NL_GET_CLIENT(type); 166 unsigned int op = RDMA_NL_GET_OP(type); 167 const struct rdma_nl_cbs *cb_table; 168 169 if (!is_nl_valid(index, op)) 170 return -EINVAL; 171 172 cb_table = rdma_nl_types[index].cb_table; 173 174 if ((cb_table[op].flags & RDMA_NL_ADMIN_PERM) && 175 !netlink_capable(skb, CAP_NET_ADMIN)) 176 return -EPERM; 177 178 /* 179 * LS responses overload the 0x100 (NLM_F_ROOT) flag. Don't 180 * mistakenly call the .dump() function. 181 */ 182 if (index == RDMA_NL_LS) { 183 if (cb_table[op].doit) 184 return cb_table[op].doit(skb, nlh, extack); 185 return -EINVAL; 186 } 187 /* FIXME: Convert IWCM to properly handle doit callbacks */ 188 if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_RDMA_CM || 189 index == RDMA_NL_IWCM) { 190 struct netlink_dump_control c = { 191 .dump = cb_table[op].dump, 192 }; 193 if (c.dump) 194 return netlink_dump_start(nls, skb, nlh, &c); 195 return -EINVAL; 196 } 197 198 if (cb_table[op].doit) 199 return cb_table[op].doit(skb, nlh, extack); 200 201 return 0; 202} 203 204/* 205 * This function is similar to netlink_rcv_skb with one exception: 206 * It calls to the callback for the netlink messages without NLM_F_REQUEST 207 * flag. These messages are intended for RDMA_NL_LS consumer, so it is allowed 208 * for that consumer only. 209 */ 210static int rdma_nl_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *, 211 struct nlmsghdr *, 212 struct netlink_ext_ack *)) 213{ 214 struct netlink_ext_ack extack = {}; 215 struct nlmsghdr *nlh; 216 int err; 217 218 while (skb->len >= nlmsg_total_size(0)) { 219 int msglen; 220 221 nlh = nlmsg_hdr(skb); 222 err = 0; 223 224 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len) 225 return 0; 226 227 /* 228 * Generally speaking, the only requests are handled 229 * by the kernel, but RDMA_NL_LS is different, because it 230 * runs backward netlink scheme. Kernel initiates messages 231 * and waits for reply with data to keep pathrecord cache 232 * in sync. 233 */ 234 if (!(nlh->nlmsg_flags & NLM_F_REQUEST) && 235 (RDMA_NL_GET_CLIENT(nlh->nlmsg_type) != RDMA_NL_LS)) 236 goto ack; 237 238 /* Skip control messages */ 239 if (nlh->nlmsg_type < NLMSG_MIN_TYPE) 240 goto ack; 241 242 err = cb(skb, nlh, &extack); 243 if (err == -EINTR) 244 goto skip; 245 246ack: 247 if (nlh->nlmsg_flags & NLM_F_ACK || err) 248 netlink_ack(skb, nlh, err, &extack); 249 250skip: 251 msglen = NLMSG_ALIGN(nlh->nlmsg_len); 252 if (msglen > skb->len) 253 msglen = skb->len; 254 skb_pull(skb, msglen); 255 } 256 257 return 0; 258} 259 260static void rdma_nl_rcv(struct sk_buff *skb) 261{ 262 mutex_lock(&rdma_nl_mutex); 263 rdma_nl_rcv_skb(skb, &rdma_nl_rcv_msg); 264 mutex_unlock(&rdma_nl_mutex); 265} 266 267int rdma_nl_unicast(struct sk_buff *skb, u32 pid) 268{ 269 int err; 270 271 err = netlink_unicast(nls, skb, pid, MSG_DONTWAIT); 272 return (err < 0) ? err : 0; 273} 274EXPORT_SYMBOL(rdma_nl_unicast); 275 276int rdma_nl_unicast_wait(struct sk_buff *skb, __u32 pid) 277{ 278 int err; 279 280 err = netlink_unicast(nls, skb, pid, 0); 281 return (err < 0) ? err : 0; 282} 283EXPORT_SYMBOL(rdma_nl_unicast_wait); 284 285int rdma_nl_multicast(struct sk_buff *skb, unsigned int group, gfp_t flags) 286{ 287 return nlmsg_multicast(nls, skb, 0, group, flags); 288} 289EXPORT_SYMBOL(rdma_nl_multicast); 290 291int __init rdma_nl_init(void) 292{ 293 struct netlink_kernel_cfg cfg = { 294 .input = rdma_nl_rcv, 295 }; 296 297 nls = netlink_kernel_create(&init_net, NETLINK_RDMA, &cfg); 298 if (!nls) 299 return -ENOMEM; 300 301 nls->sk_sndtimeo = 10 * HZ; 302 return 0; 303} 304 305void rdma_nl_exit(void) 306{ 307 int idx; 308 309 for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++) 310 rdma_nl_unregister(idx); 311 312 netlink_kernel_release(nls); 313} 314 315MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_RDMA);