···11+/*22+ * Copyright (c) 2014 Intel Corporation. All rights reserved.33+ * Copyright (c) 2014 Chelsio, Inc. All rights reserved.44+ *55+ * This software is available to you under a choice of one of two66+ * licenses. You may choose to be licensed under the terms of the GNU77+ * General Public License (GPL) Version 2, available from the file88+ * COPYING in the main directory of this source tree, or the99+ * OpenIB.org BSD license below:1010+ *1111+ * Redistribution and use in source and binary forms, with or1212+ * without modification, are permitted provided that the following1313+ * conditions are met:1414+ *1515+ * - Redistributions of source code must retain the above1616+ * copyright notice, this list of conditions and the following1717+ * disclaimer.1818+ *1919+ * - Redistributions in binary form must reproduce the above2020+ * copyright notice, this list of conditions and the following2121+ * disclaimer in the documentation and/or other materials2222+ * provided with the distribution.2323+ *2424+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,2525+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF2626+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND2727+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS2828+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN2929+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN3030+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE3131+ * SOFTWARE.3232+ */3333+3434+#include "iwpm_util.h"3535+3636+static const char iwpm_ulib_name[] = "iWarpPortMapperUser";3737+static int iwpm_ulib_version = 3;3838+static int iwpm_user_pid = IWPM_PID_UNDEFINED;3939+static atomic_t echo_nlmsg_seq;4040+4141+int iwpm_valid_pid(void)4242+{4343+ return iwpm_user_pid > 0;4444+}4545+EXPORT_SYMBOL(iwpm_valid_pid);4646+4747+/*4848+ * iwpm_register_pid - Send a netlink query to user space4949+ * for the iwarp port mapper pid5050+ *5151+ * nlmsg attributes:5252+ * [IWPM_NLA_REG_PID_SEQ]5353+ * [IWPM_NLA_REG_IF_NAME]5454+ * [IWPM_NLA_REG_IBDEV_NAME]5555+ * [IWPM_NLA_REG_ULIB_NAME]5656+ */5757+int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)5858+{5959+ struct sk_buff *skb = NULL;6060+ struct iwpm_nlmsg_request *nlmsg_request = NULL;6161+ struct nlmsghdr *nlh;6262+ u32 msg_seq;6363+ const char *err_str = "";6464+ int ret = -EINVAL;6565+6666+ if (!iwpm_valid_client(nl_client)) {6767+ err_str = "Invalid port mapper client";6868+ goto pid_query_error;6969+ }7070+ if (iwpm_registered_client(nl_client))7171+ return 0;7272+ skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REG_PID, &nlh, nl_client);7373+ if (!skb) {7474+ err_str = "Unable to create a nlmsg";7575+ goto pid_query_error;7676+ }7777+ nlh->nlmsg_seq = iwpm_get_nlmsg_seq();7878+ nlmsg_request = iwpm_get_nlmsg_request(nlh->nlmsg_seq, nl_client, GFP_KERNEL);7979+ if (!nlmsg_request) {8080+ err_str = "Unable to allocate netlink request";8181+ goto pid_query_error;8282+ }8383+ msg_seq = atomic_read(&echo_nlmsg_seq);8484+8585+ /* fill in the pid request message */8686+ err_str = "Unable to put attribute of the nlmsg";8787+ ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq, IWPM_NLA_REG_PID_SEQ);8888+ if (ret)8989+ goto pid_query_error;9090+ ret = ibnl_put_attr(skb, nlh, IWPM_IFNAME_SIZE,9191+ pm_msg->if_name, IWPM_NLA_REG_IF_NAME);9292+ if (ret)9393+ goto pid_query_error;9494+ ret = ibnl_put_attr(skb, nlh, IWPM_DEVNAME_SIZE,9595+ pm_msg->dev_name, IWPM_NLA_REG_IBDEV_NAME);9696+ if (ret)9797+ goto pid_query_error;9898+ ret = ibnl_put_attr(skb, nlh, IWPM_ULIBNAME_SIZE,9999+ (char *)iwpm_ulib_name, IWPM_NLA_REG_ULIB_NAME);100100+ if (ret)101101+ goto pid_query_error;102102+103103+ pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n",104104+ __func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name);105105+106106+ ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_IWPM, GFP_KERNEL);107107+ if (ret) {108108+ skb = NULL; /* skb is freed in the netlink send-op handling */109109+ iwpm_set_registered(nl_client, 1);110110+ iwpm_user_pid = IWPM_PID_UNAVAILABLE;111111+ err_str = "Unable to send a nlmsg";112112+ goto pid_query_error;113113+ }114114+ nlmsg_request->req_buffer = pm_msg;115115+ ret = iwpm_wait_complete_req(nlmsg_request);116116+ return ret;117117+pid_query_error:118118+ pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client);119119+ if (skb)120120+ dev_kfree_skb(skb);121121+ if (nlmsg_request)122122+ iwpm_free_nlmsg_request(&nlmsg_request->kref);123123+ return ret;124124+}125125+EXPORT_SYMBOL(iwpm_register_pid);126126+127127+/*128128+ * iwpm_add_mapping - Send a netlink add mapping message129129+ * to the port mapper130130+ * nlmsg attributes:131131+ * [IWPM_NLA_MANAGE_MAPPING_SEQ]132132+ * [IWPM_NLA_MANAGE_ADDR]133133+ */134134+int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)135135+{136136+ struct sk_buff *skb = NULL;137137+ struct iwpm_nlmsg_request *nlmsg_request = NULL;138138+ struct nlmsghdr *nlh;139139+ u32 msg_seq;140140+ const char *err_str = "";141141+ int ret = -EINVAL;142142+143143+ if (!iwpm_valid_client(nl_client)) {144144+ err_str = "Invalid port mapper client";145145+ goto add_mapping_error;146146+ }147147+ if (!iwpm_registered_client(nl_client)) {148148+ err_str = "Unregistered port mapper client";149149+ goto add_mapping_error;150150+ }151151+ if (!iwpm_valid_pid())152152+ return 0;153153+ skb = iwpm_create_nlmsg(RDMA_NL_IWPM_ADD_MAPPING, &nlh, nl_client);154154+ if (!skb) {155155+ err_str = "Unable to create a nlmsg";156156+ goto add_mapping_error;157157+ }158158+ nlh->nlmsg_seq = iwpm_get_nlmsg_seq();159159+ nlmsg_request = iwpm_get_nlmsg_request(nlh->nlmsg_seq, nl_client, GFP_KERNEL);160160+ if (!nlmsg_request) {161161+ err_str = "Unable to allocate netlink request";162162+ goto add_mapping_error;163163+ }164164+ msg_seq = atomic_read(&echo_nlmsg_seq);165165+ /* fill in the add mapping message */166166+ err_str = "Unable to put attribute of the nlmsg";167167+ ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq,168168+ IWPM_NLA_MANAGE_MAPPING_SEQ);169169+ if (ret)170170+ goto add_mapping_error;171171+ ret = ibnl_put_attr(skb, nlh, sizeof(struct sockaddr_storage),172172+ &pm_msg->loc_addr, IWPM_NLA_MANAGE_ADDR);173173+ if (ret)174174+ goto add_mapping_error;175175+ nlmsg_request->req_buffer = pm_msg;176176+177177+ ret = ibnl_unicast(skb, nlh, iwpm_user_pid);178178+ if (ret) {179179+ skb = NULL; /* skb is freed in the netlink send-op handling */180180+ iwpm_user_pid = IWPM_PID_UNDEFINED;181181+ err_str = "Unable to send a nlmsg";182182+ goto add_mapping_error;183183+ }184184+ ret = iwpm_wait_complete_req(nlmsg_request);185185+ return ret;186186+add_mapping_error:187187+ pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client);188188+ if (skb)189189+ dev_kfree_skb(skb);190190+ if (nlmsg_request)191191+ iwpm_free_nlmsg_request(&nlmsg_request->kref);192192+ return ret;193193+}194194+EXPORT_SYMBOL(iwpm_add_mapping);195195+196196+/*197197+ * iwpm_add_and_query_mapping - Send a netlink add and query198198+ * mapping message to the port mapper199199+ * nlmsg attributes:200200+ * [IWPM_NLA_QUERY_MAPPING_SEQ]201201+ * [IWPM_NLA_QUERY_LOCAL_ADDR]202202+ * [IWPM_NLA_QUERY_REMOTE_ADDR]203203+ */204204+int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)205205+{206206+ struct sk_buff *skb = NULL;207207+ struct iwpm_nlmsg_request *nlmsg_request = NULL;208208+ struct nlmsghdr *nlh;209209+ u32 msg_seq;210210+ const char *err_str = "";211211+ int ret = -EINVAL;212212+213213+ if (!iwpm_valid_client(nl_client)) {214214+ err_str = "Invalid port mapper client";215215+ goto query_mapping_error;216216+ }217217+ if (!iwpm_registered_client(nl_client)) {218218+ err_str = "Unregistered port mapper client";219219+ goto query_mapping_error;220220+ }221221+ if (!iwpm_valid_pid())222222+ return 0;223223+ ret = -ENOMEM;224224+ skb = iwpm_create_nlmsg(RDMA_NL_IWPM_QUERY_MAPPING, &nlh, nl_client);225225+ if (!skb) {226226+ err_str = "Unable to create a nlmsg";227227+ goto query_mapping_error;228228+ }229229+ nlh->nlmsg_seq = iwpm_get_nlmsg_seq();230230+ nlmsg_request = iwpm_get_nlmsg_request(nlh->nlmsg_seq,231231+ nl_client, GFP_KERNEL);232232+ if (!nlmsg_request) {233233+ err_str = "Unable to allocate netlink request";234234+ goto query_mapping_error;235235+ }236236+ msg_seq = atomic_read(&echo_nlmsg_seq);237237+238238+ /* fill in the query message */239239+ err_str = "Unable to put attribute of the nlmsg";240240+ ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq,241241+ IWPM_NLA_QUERY_MAPPING_SEQ);242242+ if (ret)243243+ goto query_mapping_error;244244+ ret = ibnl_put_attr(skb, nlh, sizeof(struct sockaddr_storage),245245+ &pm_msg->loc_addr, IWPM_NLA_QUERY_LOCAL_ADDR);246246+ if (ret)247247+ goto query_mapping_error;248248+ ret = ibnl_put_attr(skb, nlh, sizeof(struct sockaddr_storage),249249+ &pm_msg->rem_addr, IWPM_NLA_QUERY_REMOTE_ADDR);250250+ if (ret)251251+ goto query_mapping_error;252252+ nlmsg_request->req_buffer = pm_msg;253253+254254+ ret = ibnl_unicast(skb, nlh, iwpm_user_pid);255255+ if (ret) {256256+ skb = NULL; /* skb is freed in the netlink send-op handling */257257+ err_str = "Unable to send a nlmsg";258258+ goto query_mapping_error;259259+ }260260+ ret = iwpm_wait_complete_req(nlmsg_request);261261+ return ret;262262+query_mapping_error:263263+ pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client);264264+ if (skb)265265+ dev_kfree_skb(skb);266266+ if (nlmsg_request)267267+ iwpm_free_nlmsg_request(&nlmsg_request->kref);268268+ return ret;269269+}270270+EXPORT_SYMBOL(iwpm_add_and_query_mapping);271271+272272+/*273273+ * iwpm_remove_mapping - Send a netlink remove mapping message274274+ * to the port mapper275275+ * nlmsg attributes:276276+ * [IWPM_NLA_MANAGE_MAPPING_SEQ]277277+ * [IWPM_NLA_MANAGE_ADDR]278278+ */279279+int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)280280+{281281+ struct sk_buff *skb = NULL;282282+ struct nlmsghdr *nlh;283283+ u32 msg_seq;284284+ const char *err_str = "";285285+ int ret = -EINVAL;286286+287287+ if (!iwpm_valid_client(nl_client)) {288288+ err_str = "Invalid port mapper client";289289+ goto remove_mapping_error;290290+ }291291+ if (!iwpm_registered_client(nl_client)) {292292+ err_str = "Unregistered port mapper client";293293+ goto remove_mapping_error;294294+ }295295+ if (!iwpm_valid_pid())296296+ return 0;297297+ skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REMOVE_MAPPING, &nlh, nl_client);298298+ if (!skb) {299299+ ret = -ENOMEM;300300+ err_str = "Unable to create a nlmsg";301301+ goto remove_mapping_error;302302+ }303303+ msg_seq = atomic_read(&echo_nlmsg_seq);304304+ nlh->nlmsg_seq = iwpm_get_nlmsg_seq();305305+ err_str = "Unable to put attribute of the nlmsg";306306+ ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq,307307+ IWPM_NLA_MANAGE_MAPPING_SEQ);308308+ if (ret)309309+ goto remove_mapping_error;310310+ ret = ibnl_put_attr(skb, nlh, sizeof(struct sockaddr_storage),311311+ local_addr, IWPM_NLA_MANAGE_ADDR);312312+ if (ret)313313+ goto remove_mapping_error;314314+315315+ ret = ibnl_unicast(skb, nlh, iwpm_user_pid);316316+ if (ret) {317317+ skb = NULL; /* skb is freed in the netlink send-op handling */318318+ iwpm_user_pid = IWPM_PID_UNDEFINED;319319+ err_str = "Unable to send a nlmsg";320320+ goto remove_mapping_error;321321+ }322322+ iwpm_print_sockaddr(local_addr,323323+ "remove_mapping: Local sockaddr:");324324+ return 0;325325+remove_mapping_error:326326+ pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client);327327+ if (skb)328328+ dev_kfree_skb_any(skb);329329+ return ret;330330+}331331+EXPORT_SYMBOL(iwpm_remove_mapping);332332+333333+/* netlink attribute policy for the received response to register pid request */334334+static const struct nla_policy resp_reg_policy[IWPM_NLA_RREG_PID_MAX] = {335335+ [IWPM_NLA_RREG_PID_SEQ] = { .type = NLA_U32 },336336+ [IWPM_NLA_RREG_IBDEV_NAME] = { .type = NLA_STRING,337337+ .len = IWPM_DEVNAME_SIZE - 1 },338338+ [IWPM_NLA_RREG_ULIB_NAME] = { .type = NLA_STRING,339339+ .len = IWPM_ULIBNAME_SIZE - 1 },340340+ [IWPM_NLA_RREG_ULIB_VER] = { .type = NLA_U16 },341341+ [IWPM_NLA_RREG_PID_ERR] = { .type = NLA_U16 }342342+};343343+344344+/*345345+ * iwpm_register_pid_cb - Process a port mapper response to346346+ * iwpm_register_pid()347347+ */348348+int iwpm_register_pid_cb(struct sk_buff *skb, struct netlink_callback *cb)349349+{350350+ struct iwpm_nlmsg_request *nlmsg_request = NULL;351351+ struct nlattr *nltb[IWPM_NLA_RREG_PID_MAX];352352+ struct iwpm_dev_data *pm_msg;353353+ char *dev_name, *iwpm_name;354354+ u32 msg_seq;355355+ u8 nl_client;356356+ u16 iwpm_version;357357+ const char *msg_type = "Register Pid response";358358+359359+ if (iwpm_parse_nlmsg(cb, IWPM_NLA_RREG_PID_MAX,360360+ resp_reg_policy, nltb, msg_type))361361+ return -EINVAL;362362+363363+ msg_seq = nla_get_u32(nltb[IWPM_NLA_RREG_PID_SEQ]);364364+ nlmsg_request = iwpm_find_nlmsg_request(msg_seq);365365+ if (!nlmsg_request) {366366+ pr_info("%s: Could not find a matching request (seq = %u)\n",367367+ __func__, msg_seq);368368+ return -EINVAL;369369+ }370370+ pm_msg = nlmsg_request->req_buffer;371371+ nl_client = nlmsg_request->nl_client;372372+ dev_name = (char *)nla_data(nltb[IWPM_NLA_RREG_IBDEV_NAME]);373373+ iwpm_name = (char *)nla_data(nltb[IWPM_NLA_RREG_ULIB_NAME]);374374+ iwpm_version = nla_get_u16(nltb[IWPM_NLA_RREG_ULIB_VER]);375375+376376+ /* check device name, ulib name and version */377377+ if (strcmp(pm_msg->dev_name, dev_name) ||378378+ strcmp(iwpm_ulib_name, iwpm_name) ||379379+ iwpm_version != iwpm_ulib_version) {380380+381381+ pr_info("%s: Incorrect info (dev = %s name = %s version = %d)\n",382382+ __func__, dev_name, iwpm_name, iwpm_version);383383+ nlmsg_request->err_code = IWPM_USER_LIB_INFO_ERR;384384+ goto register_pid_response_exit;385385+ }386386+ iwpm_user_pid = cb->nlh->nlmsg_pid;387387+ atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);388388+ pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",389389+ __func__, iwpm_user_pid);390390+ if (iwpm_valid_client(nl_client))391391+ iwpm_set_registered(nl_client, 1);392392+register_pid_response_exit:393393+ nlmsg_request->request_done = 1;394394+ /* always for found nlmsg_request */395395+ kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);396396+ barrier();397397+ wake_up(&nlmsg_request->waitq);398398+ return 0;399399+}400400+EXPORT_SYMBOL(iwpm_register_pid_cb);401401+402402+/* netlink attribute policy for the received response to add mapping request */403403+static const struct nla_policy resp_add_policy[IWPM_NLA_RMANAGE_MAPPING_MAX] = {404404+ [IWPM_NLA_MANAGE_MAPPING_SEQ] = { .type = NLA_U32 },405405+ [IWPM_NLA_MANAGE_ADDR] = { .len = sizeof(struct sockaddr_storage) },406406+ [IWPM_NLA_MANAGE_MAPPED_LOC_ADDR] = { .len = sizeof(struct sockaddr_storage) },407407+ [IWPM_NLA_RMANAGE_MAPPING_ERR] = { .type = NLA_U16 }408408+};409409+410410+/*411411+ * iwpm_add_mapping_cb - Process a port mapper response to412412+ * iwpm_add_mapping()413413+ */414414+int iwpm_add_mapping_cb(struct sk_buff *skb, struct netlink_callback *cb)415415+{416416+ struct iwpm_sa_data *pm_msg;417417+ struct iwpm_nlmsg_request *nlmsg_request = NULL;418418+ struct nlattr *nltb[IWPM_NLA_RMANAGE_MAPPING_MAX];419419+ struct sockaddr_storage *local_sockaddr;420420+ struct sockaddr_storage *mapped_sockaddr;421421+ const char *msg_type;422422+ u32 msg_seq;423423+424424+ msg_type = "Add Mapping response";425425+ if (iwpm_parse_nlmsg(cb, IWPM_NLA_RMANAGE_MAPPING_MAX,426426+ resp_add_policy, nltb, msg_type))427427+ return -EINVAL;428428+429429+ atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);430430+431431+ msg_seq = nla_get_u32(nltb[IWPM_NLA_MANAGE_MAPPING_SEQ]);432432+ nlmsg_request = iwpm_find_nlmsg_request(msg_seq);433433+ if (!nlmsg_request) {434434+ pr_info("%s: Could not find a matching request (seq = %u)\n",435435+ __func__, msg_seq);436436+ return -EINVAL;437437+ }438438+ pm_msg = nlmsg_request->req_buffer;439439+ local_sockaddr = (struct sockaddr_storage *)440440+ nla_data(nltb[IWPM_NLA_MANAGE_ADDR]);441441+ mapped_sockaddr = (struct sockaddr_storage *)442442+ nla_data(nltb[IWPM_NLA_MANAGE_MAPPED_LOC_ADDR]);443443+444444+ if (iwpm_compare_sockaddr(local_sockaddr, &pm_msg->loc_addr)) {445445+ nlmsg_request->err_code = IWPM_USER_LIB_INFO_ERR;446446+ goto add_mapping_response_exit;447447+ }448448+ if (mapped_sockaddr->ss_family != local_sockaddr->ss_family) {449449+ pr_info("%s: Sockaddr family doesn't match the requested one\n",450450+ __func__);451451+ nlmsg_request->err_code = IWPM_USER_LIB_INFO_ERR;452452+ goto add_mapping_response_exit;453453+ }454454+ memcpy(&pm_msg->mapped_loc_addr, mapped_sockaddr,455455+ sizeof(*mapped_sockaddr));456456+ iwpm_print_sockaddr(&pm_msg->loc_addr,457457+ "add_mapping: Local sockaddr:");458458+ iwpm_print_sockaddr(&pm_msg->mapped_loc_addr,459459+ "add_mapping: Mapped local sockaddr:");460460+461461+add_mapping_response_exit:462462+ nlmsg_request->request_done = 1;463463+ /* always for found request */464464+ kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);465465+ barrier();466466+ wake_up(&nlmsg_request->waitq);467467+ return 0;468468+}469469+EXPORT_SYMBOL(iwpm_add_mapping_cb);470470+471471+/* netlink attribute policy for the response to add and query mapping request */472472+static const struct nla_policy resp_query_policy[IWPM_NLA_RQUERY_MAPPING_MAX] = {473473+ [IWPM_NLA_QUERY_MAPPING_SEQ] = { .type = NLA_U32 },474474+ [IWPM_NLA_QUERY_LOCAL_ADDR] = { .len = sizeof(struct sockaddr_storage) },475475+ [IWPM_NLA_QUERY_REMOTE_ADDR] = { .len = sizeof(struct sockaddr_storage) },476476+ [IWPM_NLA_RQUERY_MAPPED_LOC_ADDR] = { .len = sizeof(struct sockaddr_storage) },477477+ [IWPM_NLA_RQUERY_MAPPED_REM_ADDR] = { .len = sizeof(struct sockaddr_storage) },478478+ [IWPM_NLA_RQUERY_MAPPING_ERR] = { .type = NLA_U16 }479479+};480480+481481+/*482482+ * iwpm_add_and_query_mapping_cb - Process a port mapper response to483483+ * iwpm_add_and_query_mapping()484484+ */485485+int iwpm_add_and_query_mapping_cb(struct sk_buff *skb,486486+ struct netlink_callback *cb)487487+{488488+ struct iwpm_sa_data *pm_msg;489489+ struct iwpm_nlmsg_request *nlmsg_request = NULL;490490+ struct nlattr *nltb[IWPM_NLA_RQUERY_MAPPING_MAX];491491+ struct sockaddr_storage *local_sockaddr, *remote_sockaddr;492492+ struct sockaddr_storage *mapped_loc_sockaddr, *mapped_rem_sockaddr;493493+ const char *msg_type;494494+ u32 msg_seq;495495+ u16 err_code;496496+497497+ msg_type = "Query Mapping response";498498+ if (iwpm_parse_nlmsg(cb, IWPM_NLA_RQUERY_MAPPING_MAX,499499+ resp_query_policy, nltb, msg_type))500500+ return -EINVAL;501501+ atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);502502+503503+ msg_seq = nla_get_u32(nltb[IWPM_NLA_QUERY_MAPPING_SEQ]);504504+ nlmsg_request = iwpm_find_nlmsg_request(msg_seq);505505+ if (!nlmsg_request) {506506+ pr_info("%s: Could not find a matching request (seq = %u)\n",507507+ __func__, msg_seq);508508+ return -EINVAL;509509+ }510510+ pm_msg = nlmsg_request->req_buffer;511511+ local_sockaddr = (struct sockaddr_storage *)512512+ nla_data(nltb[IWPM_NLA_QUERY_LOCAL_ADDR]);513513+ remote_sockaddr = (struct sockaddr_storage *)514514+ nla_data(nltb[IWPM_NLA_QUERY_REMOTE_ADDR]);515515+ mapped_loc_sockaddr = (struct sockaddr_storage *)516516+ nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_LOC_ADDR]);517517+ mapped_rem_sockaddr = (struct sockaddr_storage *)518518+ nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_REM_ADDR]);519519+520520+ err_code = nla_get_u16(nltb[IWPM_NLA_RQUERY_MAPPING_ERR]);521521+ if (err_code == IWPM_REMOTE_QUERY_REJECT) {522522+ pr_info("%s: Received a Reject (pid = %u, echo seq = %u)\n",523523+ __func__, cb->nlh->nlmsg_pid, msg_seq);524524+ nlmsg_request->err_code = IWPM_REMOTE_QUERY_REJECT;525525+ }526526+ if (iwpm_compare_sockaddr(local_sockaddr, &pm_msg->loc_addr) ||527527+ iwpm_compare_sockaddr(remote_sockaddr, &pm_msg->rem_addr)) {528528+ pr_info("%s: Incorrect local sockaddr\n", __func__);529529+ nlmsg_request->err_code = IWPM_USER_LIB_INFO_ERR;530530+ goto query_mapping_response_exit;531531+ }532532+ if (mapped_loc_sockaddr->ss_family != local_sockaddr->ss_family ||533533+ mapped_rem_sockaddr->ss_family != remote_sockaddr->ss_family) {534534+ pr_info("%s: Sockaddr family doesn't match the requested one\n",535535+ __func__);536536+ nlmsg_request->err_code = IWPM_USER_LIB_INFO_ERR;537537+ goto query_mapping_response_exit;538538+ }539539+ memcpy(&pm_msg->mapped_loc_addr, mapped_loc_sockaddr,540540+ sizeof(*mapped_loc_sockaddr));541541+ memcpy(&pm_msg->mapped_rem_addr, mapped_rem_sockaddr,542542+ sizeof(*mapped_rem_sockaddr));543543+544544+ iwpm_print_sockaddr(&pm_msg->loc_addr,545545+ "query_mapping: Local sockaddr:");546546+ iwpm_print_sockaddr(&pm_msg->mapped_loc_addr,547547+ "query_mapping: Mapped local sockaddr:");548548+ iwpm_print_sockaddr(&pm_msg->rem_addr,549549+ "query_mapping: Remote sockaddr:");550550+ iwpm_print_sockaddr(&pm_msg->mapped_rem_addr,551551+ "query_mapping: Mapped remote sockaddr:");552552+query_mapping_response_exit:553553+ nlmsg_request->request_done = 1;554554+ /* always for found request */555555+ kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);556556+ barrier();557557+ wake_up(&nlmsg_request->waitq);558558+ return 0;559559+}560560+EXPORT_SYMBOL(iwpm_add_and_query_mapping_cb);561561+562562+/* netlink attribute policy for the received request for mapping info */563563+static const struct nla_policy resp_mapinfo_policy[IWPM_NLA_MAPINFO_REQ_MAX] = {564564+ [IWPM_NLA_MAPINFO_ULIB_NAME] = { .type = NLA_STRING,565565+ .len = IWPM_ULIBNAME_SIZE - 1 },566566+ [IWPM_NLA_MAPINFO_ULIB_VER] = { .type = NLA_U16 }567567+};568568+569569+/*570570+ * iwpm_mapping_info_cb - Process a port mapper request for mapping info571571+ */572572+int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)573573+{574574+ struct nlattr *nltb[IWPM_NLA_MAPINFO_REQ_MAX];575575+ const char *msg_type = "Mapping Info response";576576+ int iwpm_pid;577577+ u8 nl_client;578578+ char *iwpm_name;579579+ u16 iwpm_version;580580+ int ret = -EINVAL;581581+582582+ if (iwpm_parse_nlmsg(cb, IWPM_NLA_MAPINFO_REQ_MAX,583583+ resp_mapinfo_policy, nltb, msg_type)) {584584+ pr_info("%s: Unable to parse nlmsg\n", __func__);585585+ return ret;586586+ }587587+ iwpm_name = (char *)nla_data(nltb[IWPM_NLA_MAPINFO_ULIB_NAME]);588588+ iwpm_version = nla_get_u16(nltb[IWPM_NLA_MAPINFO_ULIB_VER]);589589+ if (strcmp(iwpm_ulib_name, iwpm_name) ||590590+ iwpm_version != iwpm_ulib_version) {591591+ pr_info("%s: Invalid port mapper name = %s version = %d\n",592592+ __func__, iwpm_name, iwpm_version);593593+ return ret;594594+ }595595+ nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type);596596+ if (!iwpm_valid_client(nl_client)) {597597+ pr_info("%s: Invalid port mapper client = %d\n",598598+ __func__, nl_client);599599+ return ret;600600+ }601601+ iwpm_set_registered(nl_client, 0);602602+ atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);603603+ if (!iwpm_mapinfo_available())604604+ return 0;605605+ iwpm_pid = cb->nlh->nlmsg_pid;606606+ pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",607607+ __func__, iwpm_pid);608608+ ret = iwpm_send_mapinfo(nl_client, iwpm_pid);609609+ return ret;610610+}611611+EXPORT_SYMBOL(iwpm_mapping_info_cb);612612+613613+/* netlink attribute policy for the received mapping info ack */614614+static const struct nla_policy ack_mapinfo_policy[IWPM_NLA_MAPINFO_NUM_MAX] = {615615+ [IWPM_NLA_MAPINFO_SEQ] = { .type = NLA_U32 },616616+ [IWPM_NLA_MAPINFO_SEND_NUM] = { .type = NLA_U32 },617617+ [IWPM_NLA_MAPINFO_ACK_NUM] = { .type = NLA_U32 }618618+};619619+620620+/*621621+ * iwpm_ack_mapping_info_cb - Process a port mapper ack for622622+ * the provided mapping info records623623+ */624624+int iwpm_ack_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)625625+{626626+ struct nlattr *nltb[IWPM_NLA_MAPINFO_NUM_MAX];627627+ u32 mapinfo_send, mapinfo_ack;628628+ const char *msg_type = "Mapping Info Ack";629629+630630+ if (iwpm_parse_nlmsg(cb, IWPM_NLA_MAPINFO_NUM_MAX,631631+ ack_mapinfo_policy, nltb, msg_type))632632+ return -EINVAL;633633+ mapinfo_send = nla_get_u32(nltb[IWPM_NLA_MAPINFO_SEND_NUM]);634634+ mapinfo_ack = nla_get_u32(nltb[IWPM_NLA_MAPINFO_ACK_NUM]);635635+ if (mapinfo_ack != mapinfo_send)636636+ pr_info("%s: Invalid mapinfo number (sent = %u ack-ed = %u)\n",637637+ __func__, mapinfo_send, mapinfo_ack);638638+ atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);639639+ return 0;640640+}641641+EXPORT_SYMBOL(iwpm_ack_mapping_info_cb);642642+643643+/* netlink attribute policy for the received port mapper error message */644644+static const struct nla_policy map_error_policy[IWPM_NLA_ERR_MAX] = {645645+ [IWPM_NLA_ERR_SEQ] = { .type = NLA_U32 },646646+ [IWPM_NLA_ERR_CODE] = { .type = NLA_U16 },647647+};648648+649649+/*650650+ * iwpm_mapping_error_cb - Process a port mapper error message651651+ */652652+int iwpm_mapping_error_cb(struct sk_buff *skb, struct netlink_callback *cb)653653+{654654+ struct iwpm_nlmsg_request *nlmsg_request = NULL;655655+ int nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type);656656+ struct nlattr *nltb[IWPM_NLA_ERR_MAX];657657+ u32 msg_seq;658658+ u16 err_code;659659+ const char *msg_type = "Mapping Error Msg";660660+661661+ if (iwpm_parse_nlmsg(cb, IWPM_NLA_ERR_MAX,662662+ map_error_policy, nltb, msg_type))663663+ return -EINVAL;664664+665665+ msg_seq = nla_get_u32(nltb[IWPM_NLA_ERR_SEQ]);666666+ err_code = nla_get_u16(nltb[IWPM_NLA_ERR_CODE]);667667+ pr_info("%s: Received msg seq = %u err code = %u client = %d\n",668668+ __func__, msg_seq, err_code, nl_client);669669+ /* look for nlmsg_request */670670+ nlmsg_request = iwpm_find_nlmsg_request(msg_seq);671671+ if (!nlmsg_request) {672672+ /* not all errors have associated requests */673673+ pr_debug("Could not find matching req (seq = %u)\n", msg_seq);674674+ return 0;675675+ }676676+ atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);677677+ nlmsg_request->err_code = err_code;678678+ nlmsg_request->request_done = 1;679679+ /* always for found request */680680+ kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);681681+ barrier();682682+ wake_up(&nlmsg_request->waitq);683683+ return 0;684684+}685685+EXPORT_SYMBOL(iwpm_mapping_error_cb);
+607
drivers/infiniband/core/iwpm_util.c
···11+/*22+ * Copyright (c) 2014 Chelsio, Inc. All rights reserved.33+ * Copyright (c) 2014 Intel Corporation. All rights reserved.44+ *55+ * This software is available to you under a choice of one of two66+ * licenses. You may choose to be licensed under the terms of the GNU77+ * General Public License (GPL) Version 2, available from the file88+ * COPYING in the main directory of this source tree, or the99+ * OpenIB.org BSD license below:1010+ *1111+ * Redistribution and use in source and binary forms, with or1212+ * without modification, are permitted provided that the following1313+ * conditions are met:1414+ *1515+ * - Redistributions of source code must retain the above1616+ * copyright notice, this list of conditions and the following1717+ * disclaimer.1818+ *1919+ * - Redistributions in binary form must reproduce the above2020+ * copyright notice, this list of conditions and the following2121+ * disclaimer in the documentation and/or other materials2222+ * provided with the distribution.2323+ *2424+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,2525+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF2626+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND2727+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS2828+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN2929+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN3030+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE3131+ * SOFTWARE.3232+ */3333+3434+#include "iwpm_util.h"3535+3636+#define IWPM_HASH_BUCKET_SIZE 5123737+#define IWPM_HASH_BUCKET_MASK (IWPM_HASH_BUCKET_SIZE - 1)3838+3939+static LIST_HEAD(iwpm_nlmsg_req_list);4040+static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock);4141+4242+static struct hlist_head *iwpm_hash_bucket;4343+static DEFINE_SPINLOCK(iwpm_mapinfo_lock);4444+4545+static DEFINE_MUTEX(iwpm_admin_lock);4646+static struct iwpm_admin_data iwpm_admin;4747+4848+int iwpm_init(u8 nl_client)4949+{5050+ if (iwpm_valid_client(nl_client))5151+ return -EINVAL;5252+ mutex_lock(&iwpm_admin_lock);5353+ if (atomic_read(&iwpm_admin.refcount) == 0) {5454+ iwpm_hash_bucket = kzalloc(IWPM_HASH_BUCKET_SIZE *5555+ sizeof(struct hlist_head), GFP_KERNEL);5656+ if (!iwpm_hash_bucket) {5757+ mutex_unlock(&iwpm_admin_lock);5858+ pr_err("%s Unable to create mapinfo hash table\n", __func__);5959+ return -ENOMEM;6060+ }6161+ }6262+ atomic_inc(&iwpm_admin.refcount);6363+ mutex_unlock(&iwpm_admin_lock);6464+ iwpm_set_valid(nl_client, 1);6565+ return 0;6666+}6767+EXPORT_SYMBOL(iwpm_init);6868+6969+static void free_hash_bucket(void);7070+7171+int iwpm_exit(u8 nl_client)7272+{7373+7474+ if (!iwpm_valid_client(nl_client))7575+ return -EINVAL;7676+ mutex_lock(&iwpm_admin_lock);7777+ if (atomic_read(&iwpm_admin.refcount) == 0) {7878+ mutex_unlock(&iwpm_admin_lock);7979+ pr_err("%s Incorrect usage - negative refcount\n", __func__);8080+ return -EINVAL;8181+ }8282+ if (atomic_dec_and_test(&iwpm_admin.refcount)) {8383+ free_hash_bucket();8484+ pr_debug("%s: Mapinfo hash table is destroyed\n", __func__);8585+ }8686+ mutex_unlock(&iwpm_admin_lock);8787+ iwpm_set_valid(nl_client, 0);8888+ return 0;8989+}9090+EXPORT_SYMBOL(iwpm_exit);9191+9292+static struct hlist_head *get_hash_bucket_head(struct sockaddr_storage *,9393+ struct sockaddr_storage *);9494+9595+int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,9696+ struct sockaddr_storage *mapped_sockaddr,9797+ u8 nl_client)9898+{9999+ struct hlist_head *hash_bucket_head;100100+ struct iwpm_mapping_info *map_info;101101+ unsigned long flags;102102+103103+ if (!iwpm_valid_client(nl_client))104104+ return -EINVAL;105105+ map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL);106106+ if (!map_info) {107107+ pr_err("%s: Unable to allocate a mapping info\n", __func__);108108+ return -ENOMEM;109109+ }110110+ memcpy(&map_info->local_sockaddr, local_sockaddr,111111+ sizeof(struct sockaddr_storage));112112+ memcpy(&map_info->mapped_sockaddr, mapped_sockaddr,113113+ sizeof(struct sockaddr_storage));114114+ map_info->nl_client = nl_client;115115+116116+ spin_lock_irqsave(&iwpm_mapinfo_lock, flags);117117+ if (iwpm_hash_bucket) {118118+ hash_bucket_head = get_hash_bucket_head(119119+ &map_info->local_sockaddr,120120+ &map_info->mapped_sockaddr);121121+ hlist_add_head(&map_info->hlist_node, hash_bucket_head);122122+ }123123+ spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);124124+ return 0;125125+}126126+EXPORT_SYMBOL(iwpm_create_mapinfo);127127+128128+int iwpm_remove_mapinfo(struct sockaddr_storage *local_sockaddr,129129+ struct sockaddr_storage *mapped_local_addr)130130+{131131+ struct hlist_node *tmp_hlist_node;132132+ struct hlist_head *hash_bucket_head;133133+ struct iwpm_mapping_info *map_info = NULL;134134+ unsigned long flags;135135+ int ret = -EINVAL;136136+137137+ spin_lock_irqsave(&iwpm_mapinfo_lock, flags);138138+ if (iwpm_hash_bucket) {139139+ hash_bucket_head = get_hash_bucket_head(140140+ local_sockaddr,141141+ mapped_local_addr);142142+ hlist_for_each_entry_safe(map_info, tmp_hlist_node,143143+ hash_bucket_head, hlist_node) {144144+145145+ if (!iwpm_compare_sockaddr(&map_info->mapped_sockaddr,146146+ mapped_local_addr)) {147147+148148+ hlist_del_init(&map_info->hlist_node);149149+ kfree(map_info);150150+ ret = 0;151151+ break;152152+ }153153+ }154154+ }155155+ spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);156156+ return ret;157157+}158158+EXPORT_SYMBOL(iwpm_remove_mapinfo);159159+160160+static void free_hash_bucket(void)161161+{162162+ struct hlist_node *tmp_hlist_node;163163+ struct iwpm_mapping_info *map_info;164164+ unsigned long flags;165165+ int i;166166+167167+ /* remove all the mapinfo data from the list */168168+ spin_lock_irqsave(&iwpm_mapinfo_lock, flags);169169+ for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) {170170+ hlist_for_each_entry_safe(map_info, tmp_hlist_node,171171+ &iwpm_hash_bucket[i], hlist_node) {172172+173173+ hlist_del_init(&map_info->hlist_node);174174+ kfree(map_info);175175+ }176176+ }177177+ /* free the hash list */178178+ kfree(iwpm_hash_bucket);179179+ iwpm_hash_bucket = NULL;180180+ spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);181181+}182182+183183+struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq,184184+ u8 nl_client, gfp_t gfp)185185+{186186+ struct iwpm_nlmsg_request *nlmsg_request = NULL;187187+ unsigned long flags;188188+189189+ nlmsg_request = kzalloc(sizeof(struct iwpm_nlmsg_request), gfp);190190+ if (!nlmsg_request) {191191+ pr_err("%s Unable to allocate a nlmsg_request\n", __func__);192192+ return NULL;193193+ }194194+ spin_lock_irqsave(&iwpm_nlmsg_req_lock, flags);195195+ list_add_tail(&nlmsg_request->inprocess_list, &iwpm_nlmsg_req_list);196196+ spin_unlock_irqrestore(&iwpm_nlmsg_req_lock, flags);197197+198198+ kref_init(&nlmsg_request->kref);199199+ kref_get(&nlmsg_request->kref);200200+ nlmsg_request->nlmsg_seq = nlmsg_seq;201201+ nlmsg_request->nl_client = nl_client;202202+ nlmsg_request->request_done = 0;203203+ nlmsg_request->err_code = 0;204204+ return nlmsg_request;205205+}206206+207207+void iwpm_free_nlmsg_request(struct kref *kref)208208+{209209+ struct iwpm_nlmsg_request *nlmsg_request;210210+ unsigned long flags;211211+212212+ nlmsg_request = container_of(kref, struct iwpm_nlmsg_request, kref);213213+214214+ spin_lock_irqsave(&iwpm_nlmsg_req_lock, flags);215215+ list_del_init(&nlmsg_request->inprocess_list);216216+ spin_unlock_irqrestore(&iwpm_nlmsg_req_lock, flags);217217+218218+ if (!nlmsg_request->request_done)219219+ pr_debug("%s Freeing incomplete nlmsg request (seq = %u).\n",220220+ __func__, nlmsg_request->nlmsg_seq);221221+ kfree(nlmsg_request);222222+}223223+224224+struct iwpm_nlmsg_request *iwpm_find_nlmsg_request(__u32 echo_seq)225225+{226226+ struct iwpm_nlmsg_request *nlmsg_request;227227+ struct iwpm_nlmsg_request *found_request = NULL;228228+ unsigned long flags;229229+230230+ spin_lock_irqsave(&iwpm_nlmsg_req_lock, flags);231231+ list_for_each_entry(nlmsg_request, &iwpm_nlmsg_req_list,232232+ inprocess_list) {233233+ if (nlmsg_request->nlmsg_seq == echo_seq) {234234+ found_request = nlmsg_request;235235+ kref_get(&nlmsg_request->kref);236236+ break;237237+ }238238+ }239239+ spin_unlock_irqrestore(&iwpm_nlmsg_req_lock, flags);240240+ return found_request;241241+}242242+243243+int iwpm_wait_complete_req(struct iwpm_nlmsg_request *nlmsg_request)244244+{245245+ int ret;246246+ init_waitqueue_head(&nlmsg_request->waitq);247247+248248+ ret = wait_event_timeout(nlmsg_request->waitq,249249+ (nlmsg_request->request_done != 0), IWPM_NL_TIMEOUT);250250+ if (!ret) {251251+ ret = -EINVAL;252252+ pr_info("%s: Timeout %d sec for netlink request (seq = %u)\n",253253+ __func__, (IWPM_NL_TIMEOUT/HZ), nlmsg_request->nlmsg_seq);254254+ } else {255255+ ret = nlmsg_request->err_code;256256+ }257257+ kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);258258+ return ret;259259+}260260+261261+int iwpm_get_nlmsg_seq(void)262262+{263263+ return atomic_inc_return(&iwpm_admin.nlmsg_seq);264264+}265265+266266+int iwpm_valid_client(u8 nl_client)267267+{268268+ if (nl_client >= RDMA_NL_NUM_CLIENTS)269269+ return 0;270270+ return iwpm_admin.client_list[nl_client];271271+}272272+273273+void iwpm_set_valid(u8 nl_client, int valid)274274+{275275+ if (nl_client >= RDMA_NL_NUM_CLIENTS)276276+ return;277277+ iwpm_admin.client_list[nl_client] = valid;278278+}279279+280280+/* valid client */281281+int iwpm_registered_client(u8 nl_client)282282+{283283+ return iwpm_admin.reg_list[nl_client];284284+}285285+286286+/* valid client */287287+void iwpm_set_registered(u8 nl_client, int reg)288288+{289289+ iwpm_admin.reg_list[nl_client] = reg;290290+}291291+292292+int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr,293293+ struct sockaddr_storage *b_sockaddr)294294+{295295+ if (a_sockaddr->ss_family != b_sockaddr->ss_family)296296+ return 1;297297+ if (a_sockaddr->ss_family == AF_INET) {298298+ struct sockaddr_in *a4_sockaddr =299299+ (struct sockaddr_in *)a_sockaddr;300300+ struct sockaddr_in *b4_sockaddr =301301+ (struct sockaddr_in *)b_sockaddr;302302+ if (!memcmp(&a4_sockaddr->sin_addr,303303+ &b4_sockaddr->sin_addr, sizeof(struct in_addr))304304+ && a4_sockaddr->sin_port == b4_sockaddr->sin_port)305305+ return 0;306306+307307+ } else if (a_sockaddr->ss_family == AF_INET6) {308308+ struct sockaddr_in6 *a6_sockaddr =309309+ (struct sockaddr_in6 *)a_sockaddr;310310+ struct sockaddr_in6 *b6_sockaddr =311311+ (struct sockaddr_in6 *)b_sockaddr;312312+ if (!memcmp(&a6_sockaddr->sin6_addr,313313+ &b6_sockaddr->sin6_addr, sizeof(struct in6_addr))314314+ && a6_sockaddr->sin6_port == b6_sockaddr->sin6_port)315315+ return 0;316316+317317+ } else {318318+ pr_err("%s: Invalid sockaddr family\n", __func__);319319+ }320320+ return 1;321321+}322322+323323+struct sk_buff *iwpm_create_nlmsg(u32 nl_op, struct nlmsghdr **nlh,324324+ int nl_client)325325+{326326+ struct sk_buff *skb = NULL;327327+328328+ skb = dev_alloc_skb(NLMSG_GOODSIZE);329329+ if (!skb) {330330+ pr_err("%s Unable to allocate skb\n", __func__);331331+ goto create_nlmsg_exit;332332+ }333333+ if (!(ibnl_put_msg(skb, nlh, 0, 0, nl_client, nl_op,334334+ NLM_F_REQUEST))) {335335+ pr_warn("%s: Unable to put the nlmsg header\n", __func__);336336+ dev_kfree_skb(skb);337337+ skb = NULL;338338+ }339339+create_nlmsg_exit:340340+ return skb;341341+}342342+343343+int iwpm_parse_nlmsg(struct netlink_callback *cb, int policy_max,344344+ const struct nla_policy *nlmsg_policy,345345+ struct nlattr *nltb[], const char *msg_type)346346+{347347+ int nlh_len = 0;348348+ int ret;349349+ const char *err_str = "";350350+351351+ ret = nlmsg_validate(cb->nlh, nlh_len, policy_max-1, nlmsg_policy);352352+ if (ret) {353353+ err_str = "Invalid attribute";354354+ goto parse_nlmsg_error;355355+ }356356+ ret = nlmsg_parse(cb->nlh, nlh_len, nltb, policy_max-1, nlmsg_policy);357357+ if (ret) {358358+ err_str = "Unable to parse the nlmsg";359359+ goto parse_nlmsg_error;360360+ }361361+ ret = iwpm_validate_nlmsg_attr(nltb, policy_max);362362+ if (ret) {363363+ err_str = "Invalid NULL attribute";364364+ goto parse_nlmsg_error;365365+ }366366+ return 0;367367+parse_nlmsg_error:368368+ pr_warn("%s: %s (msg type %s ret = %d)\n",369369+ __func__, err_str, msg_type, ret);370370+ return ret;371371+}372372+373373+void iwpm_print_sockaddr(struct sockaddr_storage *sockaddr, char *msg)374374+{375375+ struct sockaddr_in6 *sockaddr_v6;376376+ struct sockaddr_in *sockaddr_v4;377377+378378+ switch (sockaddr->ss_family) {379379+ case AF_INET:380380+ sockaddr_v4 = (struct sockaddr_in *)sockaddr;381381+ pr_debug("%s IPV4 %pI4: %u(0x%04X)\n",382382+ msg, &sockaddr_v4->sin_addr,383383+ ntohs(sockaddr_v4->sin_port),384384+ ntohs(sockaddr_v4->sin_port));385385+ break;386386+ case AF_INET6:387387+ sockaddr_v6 = (struct sockaddr_in6 *)sockaddr;388388+ pr_debug("%s IPV6 %pI6: %u(0x%04X)\n",389389+ msg, &sockaddr_v6->sin6_addr,390390+ ntohs(sockaddr_v6->sin6_port),391391+ ntohs(sockaddr_v6->sin6_port));392392+ break;393393+ default:394394+ break;395395+ }396396+}397397+398398+static u32 iwpm_ipv6_jhash(struct sockaddr_in6 *ipv6_sockaddr)399399+{400400+ u32 ipv6_hash = jhash(&ipv6_sockaddr->sin6_addr, sizeof(struct in6_addr), 0);401401+ u32 hash = jhash_2words(ipv6_hash, (__force u32) ipv6_sockaddr->sin6_port, 0);402402+ return hash;403403+}404404+405405+static u32 iwpm_ipv4_jhash(struct sockaddr_in *ipv4_sockaddr)406406+{407407+ u32 ipv4_hash = jhash(&ipv4_sockaddr->sin_addr, sizeof(struct in_addr), 0);408408+ u32 hash = jhash_2words(ipv4_hash, (__force u32) ipv4_sockaddr->sin_port, 0);409409+ return hash;410410+}411411+412412+static struct hlist_head *get_hash_bucket_head(struct sockaddr_storage413413+ *local_sockaddr,414414+ struct sockaddr_storage415415+ *mapped_sockaddr)416416+{417417+ u32 local_hash, mapped_hash, hash;418418+419419+ if (local_sockaddr->ss_family == AF_INET) {420420+ local_hash = iwpm_ipv4_jhash((struct sockaddr_in *) local_sockaddr);421421+ mapped_hash = iwpm_ipv4_jhash((struct sockaddr_in *) mapped_sockaddr);422422+423423+ } else if (local_sockaddr->ss_family == AF_INET6) {424424+ local_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) local_sockaddr);425425+ mapped_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) mapped_sockaddr);426426+ } else {427427+ pr_err("%s: Invalid sockaddr family\n", __func__);428428+ return NULL;429429+ }430430+431431+ if (local_hash == mapped_hash) /* if port mapper isn't available */432432+ hash = local_hash;433433+ else434434+ hash = jhash_2words(local_hash, mapped_hash, 0);435435+436436+ return &iwpm_hash_bucket[hash & IWPM_HASH_BUCKET_MASK];437437+}438438+439439+static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)440440+{441441+ struct sk_buff *skb = NULL;442442+ struct nlmsghdr *nlh;443443+ u32 msg_seq;444444+ const char *err_str = "";445445+ int ret = -EINVAL;446446+447447+ skb = iwpm_create_nlmsg(RDMA_NL_IWPM_MAPINFO_NUM, &nlh, nl_client);448448+ if (!skb) {449449+ err_str = "Unable to create a nlmsg";450450+ goto mapinfo_num_error;451451+ }452452+ nlh->nlmsg_seq = iwpm_get_nlmsg_seq();453453+ msg_seq = 0;454454+ err_str = "Unable to put attribute of mapinfo number nlmsg";455455+ ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq, IWPM_NLA_MAPINFO_SEQ);456456+ if (ret)457457+ goto mapinfo_num_error;458458+ ret = ibnl_put_attr(skb, nlh, sizeof(u32),459459+ &mapping_num, IWPM_NLA_MAPINFO_SEND_NUM);460460+ if (ret)461461+ goto mapinfo_num_error;462462+ ret = ibnl_unicast(skb, nlh, iwpm_pid);463463+ if (ret) {464464+ skb = NULL;465465+ err_str = "Unable to send a nlmsg";466466+ goto mapinfo_num_error;467467+ }468468+ pr_debug("%s: Sent mapping number = %d\n", __func__, mapping_num);469469+ return 0;470470+mapinfo_num_error:471471+ pr_info("%s: %s\n", __func__, err_str);472472+ if (skb)473473+ dev_kfree_skb(skb);474474+ return ret;475475+}476476+477477+static int send_nlmsg_done(struct sk_buff *skb, u8 nl_client, int iwpm_pid)478478+{479479+ struct nlmsghdr *nlh = NULL;480480+ int ret = 0;481481+482482+ if (!skb)483483+ return ret;484484+ if (!(ibnl_put_msg(skb, &nlh, 0, 0, nl_client,485485+ RDMA_NL_IWPM_MAPINFO, NLM_F_MULTI))) {486486+ pr_warn("%s Unable to put NLMSG_DONE\n", __func__);487487+ return -ENOMEM;488488+ }489489+ nlh->nlmsg_type = NLMSG_DONE;490490+ ret = ibnl_unicast(skb, (struct nlmsghdr *)skb->data, iwpm_pid);491491+ if (ret)492492+ pr_warn("%s Unable to send a nlmsg\n", __func__);493493+ return ret;494494+}495495+496496+int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)497497+{498498+ struct iwpm_mapping_info *map_info;499499+ struct sk_buff *skb = NULL;500500+ struct nlmsghdr *nlh;501501+ int skb_num = 0, mapping_num = 0;502502+ int i = 0, nlmsg_bytes = 0;503503+ unsigned long flags;504504+ const char *err_str = "";505505+ int ret;506506+507507+ skb = dev_alloc_skb(NLMSG_GOODSIZE);508508+ if (!skb) {509509+ ret = -ENOMEM;510510+ err_str = "Unable to allocate skb";511511+ goto send_mapping_info_exit;512512+ }513513+ skb_num++;514514+ spin_lock_irqsave(&iwpm_mapinfo_lock, flags);515515+ for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) {516516+ hlist_for_each_entry(map_info, &iwpm_hash_bucket[i],517517+ hlist_node) {518518+ if (map_info->nl_client != nl_client)519519+ continue;520520+ nlh = NULL;521521+ if (!(ibnl_put_msg(skb, &nlh, 0, 0, nl_client,522522+ RDMA_NL_IWPM_MAPINFO, NLM_F_MULTI))) {523523+ ret = -ENOMEM;524524+ err_str = "Unable to put the nlmsg header";525525+ goto send_mapping_info_unlock;526526+ }527527+ err_str = "Unable to put attribute of the nlmsg";528528+ ret = ibnl_put_attr(skb, nlh,529529+ sizeof(struct sockaddr_storage),530530+ &map_info->local_sockaddr,531531+ IWPM_NLA_MAPINFO_LOCAL_ADDR);532532+ if (ret)533533+ goto send_mapping_info_unlock;534534+535535+ ret = ibnl_put_attr(skb, nlh,536536+ sizeof(struct sockaddr_storage),537537+ &map_info->mapped_sockaddr,538538+ IWPM_NLA_MAPINFO_MAPPED_ADDR);539539+ if (ret)540540+ goto send_mapping_info_unlock;541541+542542+ iwpm_print_sockaddr(&map_info->local_sockaddr,543543+ "send_mapping_info: Local sockaddr:");544544+ iwpm_print_sockaddr(&map_info->mapped_sockaddr,545545+ "send_mapping_info: Mapped local sockaddr:");546546+ mapping_num++;547547+ nlmsg_bytes += nlh->nlmsg_len;548548+549549+ /* check if all mappings can fit in one skb */550550+ if (NLMSG_GOODSIZE - nlmsg_bytes < nlh->nlmsg_len * 2) {551551+ /* and leave room for NLMSG_DONE */552552+ nlmsg_bytes = 0;553553+ skb_num++;554554+ spin_unlock_irqrestore(&iwpm_mapinfo_lock,555555+ flags);556556+ /* send the skb */557557+ ret = send_nlmsg_done(skb, nl_client, iwpm_pid);558558+ skb = NULL;559559+ if (ret) {560560+ err_str = "Unable to send map info";561561+ goto send_mapping_info_exit;562562+ }563563+ if (skb_num == IWPM_MAPINFO_SKB_COUNT) {564564+ ret = -ENOMEM;565565+ err_str = "Insufficient skbs for map info";566566+ goto send_mapping_info_exit;567567+ }568568+ skb = dev_alloc_skb(NLMSG_GOODSIZE);569569+ if (!skb) {570570+ ret = -ENOMEM;571571+ err_str = "Unable to allocate skb";572572+ goto send_mapping_info_exit;573573+ }574574+ spin_lock_irqsave(&iwpm_mapinfo_lock, flags);575575+ }576576+ }577577+ }578578+send_mapping_info_unlock:579579+ spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);580580+send_mapping_info_exit:581581+ if (ret) {582582+ pr_warn("%s: %s (ret = %d)\n", __func__, err_str, ret);583583+ if (skb)584584+ dev_kfree_skb(skb);585585+ return ret;586586+ }587587+ send_nlmsg_done(skb, nl_client, iwpm_pid);588588+ return send_mapinfo_num(mapping_num, nl_client, iwpm_pid);589589+}590590+591591+int iwpm_mapinfo_available(void)592592+{593593+ unsigned long flags;594594+ int full_bucket = 0, i = 0;595595+596596+ spin_lock_irqsave(&iwpm_mapinfo_lock, flags);597597+ if (iwpm_hash_bucket) {598598+ for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) {599599+ if (!hlist_empty(&iwpm_hash_bucket[i])) {600600+ full_bucket = 1;601601+ break;602602+ }603603+ }604604+ }605605+ spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);606606+ return full_bucket;607607+}
+238
drivers/infiniband/core/iwpm_util.h
···11+/*22+ * Copyright (c) 2014 Intel Corporation. All rights reserved.33+ * Copyright (c) 2014 Chelsio, Inc. All rights reserved.44+ *55+ * This software is available to you under a choice of one of two66+ * licenses. You may choose to be licensed under the terms of the GNU77+ * General Public License (GPL) Version 2, available from the file88+ * COPYING in the main directory of this source tree, or the99+ * OpenIB.org BSD license below:1010+ *1111+ * Redistribution and use in source and binary forms, with or1212+ * without modification, are permitted provided that the following1313+ * conditions are met:1414+ *1515+ * - Redistributions of source code must retain the above1616+ * copyright notice, this list of conditions and the following1717+ * disclaimer.1818+ *1919+ * - Redistributions in binary form must reproduce the above2020+ * copyright notice, this list of conditions and the following2121+ * disclaimer in the documentation and/or other materials2222+ * provided with the distribution.2323+ *2424+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,2525+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF2626+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND2727+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS2828+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN2929+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN3030+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE3131+ * SOFTWARE.3232+ */3333+#ifndef _IWPM_UTIL_H3434+#define _IWPM_UTIL_H3535+3636+#include <linux/module.h>3737+#include <linux/io.h>3838+#include <linux/in.h>3939+#include <linux/in6.h>4040+#include <linux/spinlock.h>4141+#include <linux/kernel.h>4242+#include <linux/netdevice.h>4343+#include <linux/delay.h>4444+#include <linux/workqueue.h>4545+#include <linux/mutex.h>4646+#include <linux/jhash.h>4747+#include <linux/kref.h>4848+#include <net/netlink.h>4949+#include <linux/errno.h>5050+#include <rdma/iw_portmap.h>5151+#include <rdma/rdma_netlink.h>5252+5353+5454+#define IWPM_NL_RETRANS 35555+#define IWPM_NL_TIMEOUT (10*HZ)5656+#define IWPM_MAPINFO_SKB_COUNT 205757+5858+#define IWPM_PID_UNDEFINED -15959+#define IWPM_PID_UNAVAILABLE -26060+6161+struct iwpm_nlmsg_request {6262+ struct list_head inprocess_list;6363+ __u32 nlmsg_seq;6464+ void *req_buffer;6565+ u8 nl_client;6666+ u8 request_done;6767+ u16 err_code;6868+ wait_queue_head_t waitq;6969+ struct kref kref;7070+};7171+7272+struct iwpm_mapping_info {7373+ struct hlist_node hlist_node;7474+ struct sockaddr_storage local_sockaddr;7575+ struct sockaddr_storage mapped_sockaddr;7676+ u8 nl_client;7777+};7878+7979+struct iwpm_admin_data {8080+ atomic_t refcount;8181+ atomic_t nlmsg_seq;8282+ int client_list[RDMA_NL_NUM_CLIENTS];8383+ int reg_list[RDMA_NL_NUM_CLIENTS];8484+};8585+8686+/**8787+ * iwpm_get_nlmsg_request - Allocate and initialize netlink message request8888+ * @nlmsg_seq: Sequence number of the netlink message8989+ * @nl_client: The index of the netlink client9090+ * @gfp: Indicates how the memory for the request should be allocated9191+ *9292+ * Returns the newly allocated netlink request object if successful,9393+ * otherwise returns NULL9494+ */9595+struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq,9696+ u8 nl_client, gfp_t gfp);9797+9898+/**9999+ * iwpm_free_nlmsg_request - Deallocate netlink message request100100+ * @kref: Holds reference of netlink message request101101+ */102102+void iwpm_free_nlmsg_request(struct kref *kref);103103+104104+/**105105+ * iwpm_find_nlmsg_request - Find netlink message request in the request list106106+ * @echo_seq: Sequence number of the netlink request to find107107+ *108108+ * Returns the found netlink message request,109109+ * if not found, returns NULL110110+ */111111+struct iwpm_nlmsg_request *iwpm_find_nlmsg_request(__u32 echo_seq);112112+113113+/**114114+ * iwpm_wait_complete_req - Block while servicing the netlink request115115+ * @nlmsg_request: Netlink message request to service116116+ *117117+ * Wakes up, after the request is completed or expired118118+ * Returns 0 if the request is complete without error119119+ */120120+int iwpm_wait_complete_req(struct iwpm_nlmsg_request *nlmsg_request);121121+122122+/**123123+ * iwpm_get_nlmsg_seq - Get the sequence number for a netlink124124+ * message to send to the port mapper125125+ *126126+ * Returns the sequence number for the netlink message.127127+ */128128+int iwpm_get_nlmsg_seq(void);129129+130130+/**131131+ * iwpm_valid_client - Check if the port mapper client is valid132132+ * @nl_client: The index of the netlink client133133+ *134134+ * Valid clients need to call iwpm_init() before using135135+ * the port mapper136136+ */137137+int iwpm_valid_client(u8 nl_client);138138+139139+/**140140+ * iwpm_set_valid - Set the port mapper client to valid or not141141+ * @nl_client: The index of the netlink client142142+ * @valid: 1 if valid or 0 if invalid143143+ */144144+void iwpm_set_valid(u8 nl_client, int valid);145145+146146+/**147147+ * iwpm_registered_client - Check if the port mapper client is registered148148+ * @nl_client: The index of the netlink client149149+ *150150+ * Call iwpm_register_pid() to register a client151151+ */152152+int iwpm_registered_client(u8 nl_client);153153+154154+/**155155+ * iwpm_set_registered - Set the port mapper client to registered or not156156+ * @nl_client: The index of the netlink client157157+ * @reg: 1 if registered or 0 if not158158+ */159159+void iwpm_set_registered(u8 nl_client, int reg);160160+161161+/**162162+ * iwpm_send_mapinfo - Send local and mapped IPv4/IPv6 address info of163163+ * a client to the user space port mapper164164+ * @nl_client: The index of the netlink client165165+ * @iwpm_pid: The pid of the user space port mapper166166+ *167167+ * If successful, returns the number of sent mapping info records168168+ */169169+int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid);170170+171171+/**172172+ * iwpm_mapinfo_available - Check if any mapping info records is available173173+ * in the hash table174174+ *175175+ * Returns 1 if mapping information is available, otherwise returns 0176176+ */177177+int iwpm_mapinfo_available(void);178178+179179+/**180180+ * iwpm_compare_sockaddr - Compare two sockaddr storage structs181181+ *182182+ * Returns 0 if they are holding the same ip/tcp address info,183183+ * otherwise returns 1184184+ */185185+int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr,186186+ struct sockaddr_storage *b_sockaddr);187187+188188+/**189189+ * iwpm_validate_nlmsg_attr - Check for NULL netlink attributes190190+ * @nltb: Holds address of each netlink message attributes191191+ * @nla_count: Number of netlink message attributes192192+ *193193+ * Returns error if any of the nla_count attributes is NULL194194+ */195195+static inline int iwpm_validate_nlmsg_attr(struct nlattr *nltb[],196196+ int nla_count)197197+{198198+ int i;199199+ for (i = 1; i < nla_count; i++) {200200+ if (!nltb[i])201201+ return -EINVAL;202202+ }203203+ return 0;204204+}205205+206206+/**207207+ * iwpm_create_nlmsg - Allocate skb and form a netlink message208208+ * @nl_op: Netlink message opcode209209+ * @nlh: Holds address of the netlink message header in skb210210+ * @nl_client: The index of the netlink client211211+ *212212+ * Returns the newly allcated skb, or NULL if the tailroom of the skb213213+ * is insufficient to store the message header and payload214214+ */215215+struct sk_buff *iwpm_create_nlmsg(u32 nl_op, struct nlmsghdr **nlh,216216+ int nl_client);217217+218218+/**219219+ * iwpm_parse_nlmsg - Validate and parse the received netlink message220220+ * @cb: Netlink callback structure221221+ * @policy_max: Maximum attribute type to be expected222222+ * @nlmsg_policy: Validation policy223223+ * @nltb: Array to store policy_max parsed elements224224+ * @msg_type: Type of netlink message225225+ *226226+ * Returns 0 on success or a negative error code227227+ */228228+int iwpm_parse_nlmsg(struct netlink_callback *cb, int policy_max,229229+ const struct nla_policy *nlmsg_policy,230230+ struct nlattr *nltb[], const char *msg_type);231231+232232+/**233233+ * iwpm_print_sockaddr - Print IPv4/IPv6 address and TCP port234234+ * @sockaddr: Socket address to print235235+ * @msg: Message to print236236+ */237237+void iwpm_print_sockaddr(struct sockaddr_storage *sockaddr, char *msg);238238+#endif
+16-2
drivers/infiniband/core/netlink.c
···103103EXPORT_SYMBOL(ibnl_remove_client);104104105105void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq,106106- int len, int client, int op)106106+ int len, int client, int op, int flags)107107{108108 unsigned char *prev_tail;109109110110 prev_tail = skb_tail_pointer(skb);111111 *nlh = nlmsg_put(skb, 0, seq, RDMA_NL_GET_TYPE(client, op),112112- len, NLM_F_MULTI);112112+ len, flags);113113 if (!*nlh)114114 goto out_nlmsg_trim;115115 (*nlh)->nlmsg_len = skb_tail_pointer(skb) - prev_tail;···171171 netlink_rcv_skb(skb, &ibnl_rcv_msg);172172 mutex_unlock(&ibnl_mutex);173173}174174+175175+int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh,176176+ __u32 pid)177177+{178178+ return nlmsg_unicast(nls, skb, pid);179179+}180180+EXPORT_SYMBOL(ibnl_unicast);181181+182182+int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh,183183+ unsigned int group, gfp_t flags)184184+{185185+ return nlmsg_multicast(nls, skb, 0, group, flags);186186+}187187+EXPORT_SYMBOL(ibnl_multicast);174188175189int __init ibnl_init(void)176190{
+1-1
drivers/infiniband/core/sa_query.c
···618618619619static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)620620{621621- bool preload = gfp_mask & __GFP_WAIT;621621+ bool preload = !!(gfp_mask & __GFP_WAIT);622622 unsigned long flags;623623 int ret, id;624624
···7070 if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) {7171 int i;7272 if (ipath_debug & (__IPATH_PKTDBG|__IPATH_DBG) &&7373- dd->ipath_lastcancel > jiffies) {7373+ time_after(dd->ipath_lastcancel, jiffies)) {7474 __IPATH_DBG_WHICH(__IPATH_PKTDBG|__IPATH_DBG,7575 "SendbufErrs %lx %lx", sbuf[0],7676 sbuf[1]);···755755756756 /* likely due to cancel; so suppress message unless verbose */757757 if ((errs & (INFINIPATH_E_SPKTLEN | INFINIPATH_E_SPIOARMLAUNCH)) &&758758- dd->ipath_lastcancel > jiffies) {758758+ time_after(dd->ipath_lastcancel, jiffies)) {759759 /* armlaunch takes precedence; it often causes both. */760760 ipath_cdbg(VERBOSE,761761 "Suppressed %s error (%llx) after sendbuf cancel\n",
+2-2
drivers/infiniband/hw/ipath/ipath_sdma.c
···247247248248 /* ipath_sdma_abort() is done, waiting for interrupt */249249 if (status == IPATH_SDMA_ABORT_DISARMED) {250250- if (jiffies < dd->ipath_sdma_abort_intr_timeout)250250+ if (time_before(jiffies, dd->ipath_sdma_abort_intr_timeout))251251 goto resched_noprint;252252 /* give up, intr got lost somewhere */253253 ipath_dbg("give up waiting for SDMADISABLED intr\n");···341341 * JAG - this is bad to just have default be a loop without342342 * state change343343 */344344- if (jiffies > dd->ipath_sdma_abort_jiffies) {344344+ if (time_after(jiffies, dd->ipath_sdma_abort_jiffies)) {345345 ipath_dbg("looping with status 0x%08lx\n",346346 dd->ipath_sdma_status);347347 dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ;
···11/*22- * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.22+ * Copyright (c) 2006 - 2014 Intel Corporation. All rights reserved.33 *44 * This software is available to you under a choice of one of two55 * licenses. You may choose to be licensed under the terms of the GNU···5959#include <net/route.h>6060#include <net/ip_fib.h>6161#include <net/tcp.h>6262+#include <linux/fcntl.h>62636364#include "nes.h"6465···167166{168167 return rem_ref_cm_node(cm_node->cm_core, cm_node);169168}170170-171169/**172170 * create_event173171 */···482482 iph->ttl = 0x40;483483 iph->protocol = 0x06; /* IPPROTO_TCP */484484485485- iph->saddr = htonl(cm_node->loc_addr);486486- iph->daddr = htonl(cm_node->rem_addr);485485+ iph->saddr = htonl(cm_node->mapped_loc_addr);486486+ iph->daddr = htonl(cm_node->mapped_rem_addr);487487488488- tcph->source = htons(cm_node->loc_port);489489- tcph->dest = htons(cm_node->rem_port);488488+ tcph->source = htons(cm_node->mapped_loc_port);489489+ tcph->dest = htons(cm_node->mapped_rem_port);490490 tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);491491492492 if (flags & SET_ACK) {···523523524524 skb_shinfo(skb)->nr_frags = 0;525525 cm_packets_created++;526526+}527527+528528+/*529529+ * nes_create_sockaddr - Record ip addr and tcp port in a sockaddr struct530530+ */531531+static void nes_create_sockaddr(__be32 ip_addr, __be16 port,532532+ struct sockaddr_storage *addr)533533+{534534+ struct sockaddr_in *nes_sockaddr = (struct sockaddr_in *)addr;535535+ nes_sockaddr->sin_family = AF_INET;536536+ memcpy(&nes_sockaddr->sin_addr.s_addr, &ip_addr, sizeof(__be32));537537+ nes_sockaddr->sin_port = port;538538+}539539+540540+/*541541+ * nes_create_mapinfo - Create a mapinfo object in the port mapper data base542542+ */543543+static int nes_create_mapinfo(struct nes_cm_info *cm_info)544544+{545545+ struct sockaddr_storage local_sockaddr;546546+ struct sockaddr_storage mapped_sockaddr;547547+548548+ nes_create_sockaddr(htonl(cm_info->loc_addr), htons(cm_info->loc_port),549549+ &local_sockaddr);550550+ nes_create_sockaddr(htonl(cm_info->mapped_loc_addr),551551+ htons(cm_info->mapped_loc_port), &mapped_sockaddr);552552+553553+ return iwpm_create_mapinfo(&local_sockaddr,554554+ &mapped_sockaddr, RDMA_NL_NES);555555+}556556+557557+/*558558+ * nes_remove_mapinfo - Remove a mapinfo object from the port mapper data base559559+ * and send a remove mapping op message to560560+ * the userspace port mapper561561+ */562562+static int nes_remove_mapinfo(u32 loc_addr, u16 loc_port,563563+ u32 mapped_loc_addr, u16 mapped_loc_port)564564+{565565+ struct sockaddr_storage local_sockaddr;566566+ struct sockaddr_storage mapped_sockaddr;567567+568568+ nes_create_sockaddr(htonl(loc_addr), htons(loc_port), &local_sockaddr);569569+ nes_create_sockaddr(htonl(mapped_loc_addr), htons(mapped_loc_port),570570+ &mapped_sockaddr);571571+572572+ iwpm_remove_mapinfo(&local_sockaddr, &mapped_sockaddr);573573+ return iwpm_remove_mapping(&local_sockaddr, RDMA_NL_NES);574574+}575575+576576+/*577577+ * nes_form_pm_msg - Form a port mapper message with mapping info578578+ */579579+static void nes_form_pm_msg(struct nes_cm_info *cm_info,580580+ struct iwpm_sa_data *pm_msg)581581+{582582+ nes_create_sockaddr(htonl(cm_info->loc_addr), htons(cm_info->loc_port),583583+ &pm_msg->loc_addr);584584+ nes_create_sockaddr(htonl(cm_info->rem_addr), htons(cm_info->rem_port),585585+ &pm_msg->rem_addr);586586+}587587+588588+/*589589+ * nes_form_reg_msg - Form a port mapper message with dev info590590+ */591591+static void nes_form_reg_msg(struct nes_vnic *nesvnic,592592+ struct iwpm_dev_data *pm_msg)593593+{594594+ memcpy(pm_msg->dev_name, nesvnic->nesibdev->ibdev.name,595595+ IWPM_DEVNAME_SIZE);596596+ memcpy(pm_msg->if_name, nesvnic->netdev->name, IWPM_IFNAME_SIZE);597597+}598598+599599+/*600600+ * nes_record_pm_msg - Save the received mapping info601601+ */602602+static void nes_record_pm_msg(struct nes_cm_info *cm_info,603603+ struct iwpm_sa_data *pm_msg)604604+{605605+ struct sockaddr_in *mapped_loc_addr =606606+ (struct sockaddr_in *)&pm_msg->mapped_loc_addr;607607+ struct sockaddr_in *mapped_rem_addr =608608+ (struct sockaddr_in *)&pm_msg->mapped_rem_addr;609609+610610+ if (mapped_loc_addr->sin_family == AF_INET) {611611+ cm_info->mapped_loc_addr =612612+ ntohl(mapped_loc_addr->sin_addr.s_addr);613613+ cm_info->mapped_loc_port = ntohs(mapped_loc_addr->sin_port);614614+ }615615+ if (mapped_rem_addr->sin_family == AF_INET) {616616+ cm_info->mapped_rem_addr =617617+ ntohl(mapped_rem_addr->sin_addr.s_addr);618618+ cm_info->mapped_rem_port = ntohs(mapped_rem_addr->sin_port);619619+ }526620}527621528622/**···12411147 loc_addr, loc_port,12421148 cm_node->rem_addr, cm_node->rem_port,12431149 rem_addr, rem_port);12441244- if ((cm_node->loc_addr == loc_addr) && (cm_node->loc_port == loc_port) &&12451245- (cm_node->rem_addr == rem_addr) && (cm_node->rem_port == rem_port)) {11501150+ if ((cm_node->mapped_loc_addr == loc_addr) &&11511151+ (cm_node->mapped_loc_port == loc_port) &&11521152+ (cm_node->mapped_rem_addr == rem_addr) &&11531153+ (cm_node->mapped_rem_port == rem_port)) {11541154+12461155 add_ref_cm_node(cm_node);12471156 spin_unlock_irqrestore(&cm_core->ht_lock, flags);12481157 return cm_node;···12621165 * find_listener - find a cm node listening on this addr-port pair12631166 */12641167static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core,12651265- nes_addr_t dst_addr, u16 dst_port, enum nes_cm_listener_state listener_state)11681168+ nes_addr_t dst_addr, u16 dst_port,11691169+ enum nes_cm_listener_state listener_state, int local)12661170{12671171 unsigned long flags;12681172 struct nes_cm_listener *listen_node;11731173+ nes_addr_t listen_addr;11741174+ u16 listen_port;1269117512701176 /* walk list and find cm_node associated with this session ID */12711177 spin_lock_irqsave(&cm_core->listen_list_lock, flags);12721178 list_for_each_entry(listen_node, &cm_core->listen_list.list, list) {11791179+ if (local) {11801180+ listen_addr = listen_node->loc_addr;11811181+ listen_port = listen_node->loc_port;11821182+ } else {11831183+ listen_addr = listen_node->mapped_loc_addr;11841184+ listen_port = listen_node->mapped_loc_port;11851185+ }12731186 /* compare node pair, return node handle if a match */12741274- if (((listen_node->loc_addr == dst_addr) ||12751275- listen_node->loc_addr == 0x00000000) &&12761276- (listen_node->loc_port == dst_port) &&11871187+ if (((listen_addr == dst_addr) ||11881188+ listen_addr == 0x00000000) &&11891189+ (listen_port == dst_port) &&12771190 (listener_state & listen_node->listener_state)) {12781191 atomic_inc(&listen_node->ref_count);12791192 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);···12951188 /* no listener */12961189 return NULL;12971190}12981298-1299119113001192/**13011193 * add_hte_node - add a cm node to the hash table···1416131014171311 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);1418131214191419- if (listener->nesvnic)14201420- nes_manage_apbvt(listener->nesvnic, listener->loc_port,14211421- PCI_FUNC(listener->nesvnic->nesdev->pcidev->devfn), NES_MANAGE_APBVT_DEL);13131313+ if (listener->nesvnic) {13141314+ nes_manage_apbvt(listener->nesvnic,13151315+ listener->mapped_loc_port,13161316+ PCI_FUNC(listener->nesvnic->nesdev->pcidev->devfn),13171317+ NES_MANAGE_APBVT_DEL);13181318+13191319+ nes_remove_mapinfo(listener->loc_addr,13201320+ listener->loc_port,13211321+ listener->mapped_loc_addr,13221322+ listener->mapped_loc_port);13231323+ nes_debug(NES_DBG_NLMSG,13241324+ "Delete APBVT mapped_loc_port = %04X\n",13251325+ listener->mapped_loc_port);13261326+ }1422132714231328 nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener);14241329···15711454 cm_node->loc_port = cm_info->loc_port;15721455 cm_node->rem_port = cm_info->rem_port;1573145614571457+ cm_node->mapped_loc_addr = cm_info->mapped_loc_addr;14581458+ cm_node->mapped_rem_addr = cm_info->mapped_rem_addr;14591459+ cm_node->mapped_loc_port = cm_info->mapped_loc_port;14601460+ cm_node->mapped_rem_port = cm_info->mapped_rem_port;14611461+15741462 cm_node->mpa_frame_rev = mpa_version;15751463 cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;15761464 cm_node->mpav2_ird_ord = 0;···16221500 cm_node->loopbackpartner = NULL;1623150116241502 /* get the mac addr for the remote node */16251625- oldarpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE);16261626- arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr, oldarpindex);15031503+ oldarpindex = nes_arp_table(nesdev, cm_node->mapped_rem_addr,15041504+ NULL, NES_ARP_RESOLVE);15051505+ arpindex = nes_addr_resolve_neigh(nesvnic,15061506+ cm_node->mapped_rem_addr, oldarpindex);16271507 if (arpindex < 0) {16281508 kfree(cm_node);16291509 return NULL;···16871563 mini_cm_dec_refcnt_listen(cm_core, cm_node->listener, 0);16881564 } else {16891565 if (cm_node->apbvt_set && cm_node->nesvnic) {16901690- nes_manage_apbvt(cm_node->nesvnic, cm_node->loc_port,16911691- PCI_FUNC(16921692- cm_node->nesvnic->nesdev->pcidev->devfn),15661566+ nes_manage_apbvt(cm_node->nesvnic, cm_node->mapped_loc_port,15671567+ PCI_FUNC(cm_node->nesvnic->nesdev->pcidev->devfn),16931568 NES_MANAGE_APBVT_DEL);16941569 }15701570+ nes_debug(NES_DBG_NLMSG, "Delete APBVT mapped_loc_port = %04X\n",15711571+ cm_node->mapped_loc_port);15721572+ nes_remove_mapinfo(cm_node->loc_addr, cm_node->loc_port,15731573+ cm_node->mapped_loc_addr, cm_node->mapped_loc_port);16951574 }1696157516971576 atomic_dec(&cm_core->node_cnt);···23622235 * mini_cm_listen - create a listen node with params23632236 */23642237static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,23652365- struct nes_vnic *nesvnic, struct nes_cm_info *cm_info)22382238+ struct nes_vnic *nesvnic, struct nes_cm_info *cm_info)23662239{23672240 struct nes_cm_listener *listener;22412241+ struct iwpm_dev_data pm_reg_msg;22422242+ struct iwpm_sa_data pm_msg;23682243 unsigned long flags;22442244+ int iwpm_err = 0;2369224523702246 nes_debug(NES_DBG_CM, "Search for 0x%08x : 0x%04x\n",23712247 cm_info->loc_addr, cm_info->loc_port);2372224823732249 /* cannot have multiple matching listeners */23742374- listener = find_listener(cm_core, htonl(cm_info->loc_addr),23752375- htons(cm_info->loc_port), NES_CM_LISTENER_EITHER_STATE);22502250+ listener = find_listener(cm_core, cm_info->loc_addr, cm_info->loc_port,22512251+ NES_CM_LISTENER_EITHER_STATE, 1);22522252+23762253 if (listener && listener->listener_state == NES_CM_LISTENER_ACTIVE_STATE) {23772254 /* find automatically incs ref count ??? */23782255 atomic_dec(&listener->ref_count);···23852254 }2386225523872256 if (!listener) {22572257+ nes_form_reg_msg(nesvnic, &pm_reg_msg);22582258+ iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_NES);22592259+ if (iwpm_err) {22602260+ nes_debug(NES_DBG_NLMSG,22612261+ "Port Mapper reg pid fail (err = %d).\n", iwpm_err);22622262+ }22632263+ if (iwpm_valid_pid() && !iwpm_err) {22642264+ nes_form_pm_msg(cm_info, &pm_msg);22652265+ iwpm_err = iwpm_add_mapping(&pm_msg, RDMA_NL_NES);22662266+ if (iwpm_err)22672267+ nes_debug(NES_DBG_NLMSG,22682268+ "Port Mapper query fail (err = %d).\n", iwpm_err);22692269+ else22702270+ nes_record_pm_msg(cm_info, &pm_msg);22712271+ }22722272+23882273 /* create a CM listen node (1/2 node to compare incoming traffic to) */23892274 listener = kzalloc(sizeof(*listener), GFP_ATOMIC);23902275 if (!listener) {···24082261 return NULL;24092262 }2410226324112411- listener->loc_addr = htonl(cm_info->loc_addr);24122412- listener->loc_port = htons(cm_info->loc_port);22642264+ listener->loc_addr = cm_info->loc_addr;22652265+ listener->loc_port = cm_info->loc_port;22662266+ listener->mapped_loc_addr = cm_info->mapped_loc_addr;22672267+ listener->mapped_loc_port = cm_info->mapped_loc_port;24132268 listener->reused_node = 0;2414226924152270 atomic_set(&listener->ref_count, 1);···2473232424742325 if (cm_info->loc_addr == cm_info->rem_addr) {24752326 loopbackremotelistener = find_listener(cm_core,24762476- ntohl(nesvnic->local_ipaddr), cm_node->rem_port,24772477- NES_CM_LISTENER_ACTIVE_STATE);23272327+ cm_node->mapped_loc_addr, cm_node->mapped_rem_port,23282328+ NES_CM_LISTENER_ACTIVE_STATE, 0);24782329 if (loopbackremotelistener == NULL) {24792330 create_event(cm_node, NES_CM_EVENT_ABORTED);24802331 } else {24812332 loopback_cm_info = *cm_info;24822333 loopback_cm_info.loc_port = cm_info->rem_port;24832334 loopback_cm_info.rem_port = cm_info->loc_port;23352335+ loopback_cm_info.mapped_loc_port =23362336+ cm_info->mapped_rem_port;23372337+ loopback_cm_info.mapped_rem_port =23382338+ cm_info->mapped_loc_port;24842339 loopback_cm_info.cm_id = loopbackremotelistener->cm_id;24852340 loopbackremotenode = make_cm_node(cm_core, nesvnic,24862341 &loopback_cm_info, loopbackremotelistener);···27132560 nfo.rem_addr = ntohl(iph->saddr);27142561 nfo.rem_port = ntohs(tcph->source);2715256225632563+ /* If port mapper is available these should be mapped address info */25642564+ nfo.mapped_loc_addr = ntohl(iph->daddr);25652565+ nfo.mapped_loc_port = ntohs(tcph->dest);25662566+ nfo.mapped_rem_addr = ntohl(iph->saddr);25672567+ nfo.mapped_rem_port = ntohs(tcph->source);25682568+27162569 tmp_daddr = cpu_to_be32(iph->daddr);27172570 tmp_saddr = cpu_to_be32(iph->saddr);27182571···2727256827282569 do {27292570 cm_node = find_node(cm_core,27302730- nfo.rem_port, nfo.rem_addr,27312731- nfo.loc_port, nfo.loc_addr);25712571+ nfo.mapped_rem_port, nfo.mapped_rem_addr,25722572+ nfo.mapped_loc_port, nfo.mapped_loc_addr);2732257327332574 if (!cm_node) {27342575 /* Only type of packet accepted are for */···27372578 skb_handled = 0;27382579 break;27392580 }27402740- listener = find_listener(cm_core, nfo.loc_addr,27412741- nfo.loc_port,27422742- NES_CM_LISTENER_ACTIVE_STATE);25812581+ listener = find_listener(cm_core, nfo.mapped_loc_addr,25822582+ nfo.mapped_loc_port,25832583+ NES_CM_LISTENER_ACTIVE_STATE, 0);27432584 if (!listener) {27442585 nfo.cm_id = NULL;27452586 nfo.conn_type = 0;···3343318433443185 nes_cm_init_tsa_conn(nesqp, cm_node);3345318633463346- nesqp->nesqp_context->tcpPorts[0] = cpu_to_le16(ntohs(laddr->sin_port));33473347- nesqp->nesqp_context->tcpPorts[1] = cpu_to_le16(ntohs(raddr->sin_port));31873187+ nesqp->nesqp_context->tcpPorts[0] =31883188+ cpu_to_le16(cm_node->mapped_loc_port);31893189+ nesqp->nesqp_context->tcpPorts[1] =31903190+ cpu_to_le16(cm_node->mapped_rem_port);3348319133493349- nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(raddr->sin_addr.s_addr));31923192+ nesqp->nesqp_context->ip0 = cpu_to_le32(cm_node->mapped_rem_addr);3350319333513194 nesqp->nesqp_context->misc2 |= cpu_to_le32(33523195 (u32)PCI_FUNC(nesdev->pcidev->devfn) <<···33723211 memset(&nes_quad, 0, sizeof(nes_quad));33733212 nes_quad.DstIpAdrIndex =33743213 cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);33753375- nes_quad.SrcIpadr = raddr->sin_addr.s_addr;33763376- nes_quad.TcpPorts[0] = raddr->sin_port;33773377- nes_quad.TcpPorts[1] = laddr->sin_port;32143214+ nes_quad.SrcIpadr = htonl(cm_node->mapped_rem_addr);32153215+ nes_quad.TcpPorts[0] = htons(cm_node->mapped_rem_port);32163216+ nes_quad.TcpPorts[1] = htons(cm_node->mapped_loc_port);3378321733793218 /* Produce hash key */33803219 crc_value = get_crc_value(&nes_quad);···34763315 int apbvt_set = 0;34773316 struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;34783317 struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;33183318+ struct iwpm_dev_data pm_reg_msg;33193319+ struct iwpm_sa_data pm_msg;33203320+ int iwpm_err = 0;3479332134803322 if (cm_id->remote_addr.ss_family != AF_INET)34813323 return -ENOSYS;···35163352 nes_debug(NES_DBG_CM, "mpa private data len =%u\n",35173353 conn_param->private_data_len);3518335433553355+ /* set up the connection params for the node */33563356+ cm_info.loc_addr = ntohl(laddr->sin_addr.s_addr);33573357+ cm_info.loc_port = ntohs(laddr->sin_port);33583358+ cm_info.rem_addr = ntohl(raddr->sin_addr.s_addr);33593359+ cm_info.rem_port = ntohs(raddr->sin_port);33603360+ cm_info.cm_id = cm_id;33613361+ cm_info.conn_type = NES_CM_IWARP_CONN_TYPE;33623362+33633363+ /* No port mapper available, go with the specified peer information */33643364+ cm_info.mapped_loc_addr = cm_info.loc_addr;33653365+ cm_info.mapped_loc_port = cm_info.loc_port;33663366+ cm_info.mapped_rem_addr = cm_info.rem_addr;33673367+ cm_info.mapped_rem_port = cm_info.rem_port;33683368+33693369+ nes_form_reg_msg(nesvnic, &pm_reg_msg);33703370+ iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_NES);33713371+ if (iwpm_err) {33723372+ nes_debug(NES_DBG_NLMSG,33733373+ "Port Mapper reg pid fail (err = %d).\n", iwpm_err);33743374+ }33753375+ if (iwpm_valid_pid() && !iwpm_err) {33763376+ nes_form_pm_msg(&cm_info, &pm_msg);33773377+ iwpm_err = iwpm_add_and_query_mapping(&pm_msg, RDMA_NL_NES);33783378+ if (iwpm_err)33793379+ nes_debug(NES_DBG_NLMSG,33803380+ "Port Mapper query fail (err = %d).\n", iwpm_err);33813381+ else33823382+ nes_record_pm_msg(&cm_info, &pm_msg);33833383+ }33843384+35193385 if (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr) {35203520- nes_manage_apbvt(nesvnic, ntohs(laddr->sin_port),35213521- PCI_FUNC(nesdev->pcidev->devfn),35223522- NES_MANAGE_APBVT_ADD);33863386+ nes_manage_apbvt(nesvnic, cm_info.mapped_loc_port,33873387+ PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD);35233388 apbvt_set = 1;35243389 }3525339035263526- /* set up the connection params for the node */35273527- cm_info.loc_addr = htonl(laddr->sin_addr.s_addr);35283528- cm_info.loc_port = htons(laddr->sin_port);35293529- cm_info.rem_addr = htonl(raddr->sin_addr.s_addr);35303530- cm_info.rem_port = htons(raddr->sin_port);35313531- cm_info.cm_id = cm_id;35323532- cm_info.conn_type = NES_CM_IWARP_CONN_TYPE;33913391+ if (nes_create_mapinfo(&cm_info))33923392+ return -ENOMEM;3533339335343394 cm_id->add_ref(cm_id);35353395···35633375 &cm_info);35643376 if (!cm_node) {35653377 if (apbvt_set)35663566- nes_manage_apbvt(nesvnic, ntohs(laddr->sin_port),33783378+ nes_manage_apbvt(nesvnic, cm_info.mapped_loc_port,35673379 PCI_FUNC(nesdev->pcidev->devfn),35683380 NES_MANAGE_APBVT_DEL);3569338133823382+ nes_debug(NES_DBG_NLMSG, "Delete mapped_loc_port = %04X\n",33833383+ cm_info.mapped_loc_port);33843384+ nes_remove_mapinfo(cm_info.loc_addr, cm_info.loc_port,33853385+ cm_info.mapped_loc_addr, cm_info.mapped_loc_port);35703386 cm_id->rem_ref(cm_id);35713387 return -ENOMEM;35723388 }···36163424 nesvnic->local_ipaddr, laddr->sin_addr.s_addr);3617342536183426 /* setup listen params in our api call struct */36193619- cm_info.loc_addr = nesvnic->local_ipaddr;36203620- cm_info.loc_port = laddr->sin_port;34273427+ cm_info.loc_addr = ntohl(nesvnic->local_ipaddr);34283428+ cm_info.loc_port = ntohs(laddr->sin_port);36213429 cm_info.backlog = backlog;36223430 cm_info.cm_id = cm_id;3623343136243432 cm_info.conn_type = NES_CM_IWARP_CONN_TYPE;3625343334343434+ /* No port mapper available, go with the specified info */34353435+ cm_info.mapped_loc_addr = cm_info.loc_addr;34363436+ cm_info.mapped_loc_port = cm_info.loc_port;3626343736273438 cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info);36283439 if (!cm_node) {···36373442 cm_id->provider_data = cm_node;3638344336393444 if (!cm_node->reused_node) {36403640- err = nes_manage_apbvt(nesvnic, ntohs(laddr->sin_port),34453445+ if (nes_create_mapinfo(&cm_info))34463446+ return -ENOMEM;34473447+34483448+ err = nes_manage_apbvt(nesvnic, cm_node->mapped_loc_port,36413449 PCI_FUNC(nesvnic->nesdev->pcidev->devfn),36423450 NES_MANAGE_APBVT_ADD);36433451 if (err) {···37653567 nes_cm_init_tsa_conn(nesqp, cm_node);3766356837673569 /* set the QP tsa context */37683768- nesqp->nesqp_context->tcpPorts[0] = cpu_to_le16(ntohs(laddr->sin_port));37693769- nesqp->nesqp_context->tcpPorts[1] = cpu_to_le16(ntohs(raddr->sin_port));37703770- nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(raddr->sin_addr.s_addr));35703570+ nesqp->nesqp_context->tcpPorts[0] =35713571+ cpu_to_le16(cm_node->mapped_loc_port);35723572+ nesqp->nesqp_context->tcpPorts[1] =35733573+ cpu_to_le16(cm_node->mapped_rem_port);35743574+ nesqp->nesqp_context->ip0 = cpu_to_le32(cm_node->mapped_rem_addr);3771357537723576 nesqp->nesqp_context->misc2 |= cpu_to_le32(37733577 (u32)PCI_FUNC(nesdev->pcidev->devfn) <<···3799359938003600 nes_quad.DstIpAdrIndex =38013601 cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);38023802- nes_quad.SrcIpadr = raddr->sin_addr.s_addr;38033803- nes_quad.TcpPorts[0] = raddr->sin_port;38043804- nes_quad.TcpPorts[1] = laddr->sin_port;36023602+ nes_quad.SrcIpadr = htonl(cm_node->mapped_rem_addr);36033603+ nes_quad.TcpPorts[0] = htons(cm_node->mapped_rem_port);36043604+ nes_quad.TcpPorts[1] = htons(cm_node->mapped_loc_port);3805360538063606 /* Produce hash key */38073607 crc_value = get_crc_value(&nes_quad);···38293629 cm_event.ird = cm_node->ird_size;38303630 cm_event.ord = cm_node->ord_size;3831363138323832- cm_event_laddr->sin_addr.s_addr = event->cm_info.rem_addr;36323632+ cm_event_laddr->sin_addr.s_addr = htonl(event->cm_info.rem_addr);38333633 ret = cm_id->event_handler(cm_id, &cm_event);38343634 nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);38353635
+9-3
drivers/infiniband/hw/nes/nes_cm.h
···11/*22- * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.22+ * Copyright (c) 2006 - 2014 Intel Corporation. All rights reserved.33 *44 * This software is available to you under a choice of one of two55 * licenses. You may choose to be licensed under the terms of the GNU···293293 struct list_head list;294294 struct nes_cm_core *cm_core;295295 u8 loc_mac[ETH_ALEN];296296- nes_addr_t loc_addr;297297- u16 loc_port;296296+ nes_addr_t loc_addr, mapped_loc_addr;297297+ u16 loc_port, mapped_loc_port;298298 struct iw_cm_id *cm_id;299299 enum nes_cm_conn_type conn_type;300300 atomic_t ref_count;···308308/* per connection node and node state information */309309struct nes_cm_node {310310 nes_addr_t loc_addr, rem_addr;311311+ nes_addr_t mapped_loc_addr, mapped_rem_addr;311312 u16 loc_port, rem_port;313313+ u16 mapped_loc_port, mapped_rem_port;312314313315 u8 loc_mac[ETH_ALEN];314316 u8 rem_mac[ETH_ALEN];···366364 u16 rem_port;367365 nes_addr_t loc_addr;368366 nes_addr_t rem_addr;367367+ u16 mapped_loc_port;368368+ u16 mapped_rem_port;369369+ nes_addr_t mapped_loc_addr;370370+ nes_addr_t mapped_rem_addr;369371370372 enum nes_cm_conn_type conn_type;371373 int backlog;
···12721272 * Do all the generic driver unit- and chip-independent memory12731273 * allocation and initialization.12741274 */12751275-static int __init qlogic_ib_init(void)12751275+static int __init qib_ib_init(void)12761276{12771277 int ret;12781278···13161316 return ret;13171317}1318131813191319-module_init(qlogic_ib_init);13191319+module_init(qib_ib_init);1320132013211321/*13221322 * Do the non-unit driver cleanup, memory free, etc. at unload.13231323 */13241324-static void __exit qlogic_ib_cleanup(void)13241324+static void __exit qib_ib_cleanup(void)13251325{13261326 int ret;13271327···13461346 qib_dev_cleanup();13471347}1348134813491349-module_exit(qlogic_ib_cleanup);13491349+module_exit(qib_ib_cleanup);1350135013511351/* this can only be called after a successful initialization */13521352static void cleanup_device_data(struct qib_devdata *dd)
···11+/*22+ * Copyright (c) 2014, Cisco Systems, Inc. All rights reserved.33+ *44+ * This program is free software; you may redistribute it and/or modify55+ * it under the terms of the GNU General Public License as published by66+ * the Free Software Foundation; version 2 of the License.77+ *88+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,99+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF1010+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND1111+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS1212+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN1313+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN1414+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE1515+ * SOFTWARE.1616+ *1717+ */1818+119#include <linux/init.h>220#include <linux/list.h>321#include <linux/slab.h>
···581581 return ret;582582}583583584584+void iser_release_work(struct work_struct *work)585585+{586586+ struct iser_conn *ib_conn;587587+588588+ ib_conn = container_of(work, struct iser_conn, release_work);589589+590590+ /* wait for .conn_stop callback */591591+ wait_for_completion(&ib_conn->stop_completion);592592+593593+ /* wait for the qp`s post send and post receive buffers to empty */594594+ wait_event_interruptible(ib_conn->wait,595595+ ib_conn->state == ISER_CONN_DOWN);596596+597597+ iser_conn_release(ib_conn);598598+}599599+584600/**585601 * Frees all conn objects and deallocs conn descriptor586602 */587587-static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id)603603+void iser_conn_release(struct iser_conn *ib_conn)588604{589605 struct iser_device *device = ib_conn->device;590606591591- BUG_ON(ib_conn->state != ISER_CONN_DOWN);607607+ BUG_ON(ib_conn->state == ISER_CONN_UP);592608593609 mutex_lock(&ig.connlist_mutex);594610 list_del(&ib_conn->conn_list);···616600 if (device != NULL)617601 iser_device_try_release(device);618602 /* if cma handler context, the caller actually destroy the id */619619- if (ib_conn->cma_id != NULL && can_destroy_id) {603603+ if (ib_conn->cma_id != NULL) {620604 rdma_destroy_id(ib_conn->cma_id);621605 ib_conn->cma_id = NULL;622606 }623607 iscsi_destroy_endpoint(ib_conn->ep);624624-}625625-626626-void iser_conn_get(struct iser_conn *ib_conn)627627-{628628- atomic_inc(&ib_conn->refcount);629629-}630630-631631-int iser_conn_put(struct iser_conn *ib_conn, int can_destroy_id)632632-{633633- if (atomic_dec_and_test(&ib_conn->refcount)) {634634- iser_conn_release(ib_conn, can_destroy_id);635635- return 1;636636- }637637- return 0;638608}639609640610/**···640638 if (err)641639 iser_err("Failed to disconnect, conn: 0x%p err %d\n",642640 ib_conn,err);643643-644644- wait_event_interruptible(ib_conn->wait,645645- ib_conn->state == ISER_CONN_DOWN);646646-647647- iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */648641}649642650650-static int iser_connect_error(struct rdma_cm_id *cma_id)643643+static void iser_connect_error(struct rdma_cm_id *cma_id)651644{652645 struct iser_conn *ib_conn;646646+653647 ib_conn = (struct iser_conn *)cma_id->context;654648655649 ib_conn->state = ISER_CONN_DOWN;656650 wake_up_interruptible(&ib_conn->wait);657657- return iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */658651}659652660660-static int iser_addr_handler(struct rdma_cm_id *cma_id)653653+static void iser_addr_handler(struct rdma_cm_id *cma_id)661654{662655 struct iser_device *device;663656 struct iser_conn *ib_conn;···661664 device = iser_device_find_by_ib_device(cma_id);662665 if (!device) {663666 iser_err("device lookup/creation failed\n");664664- return iser_connect_error(cma_id);667667+ iser_connect_error(cma_id);668668+ return;665669 }666670667671 ib_conn = (struct iser_conn *)cma_id->context;···684686 ret = rdma_resolve_route(cma_id, 1000);685687 if (ret) {686688 iser_err("resolve route failed: %d\n", ret);687687- return iser_connect_error(cma_id);689689+ iser_connect_error(cma_id);690690+ return;688691 }689689-690690- return 0;691692}692693693693-static int iser_route_handler(struct rdma_cm_id *cma_id)694694+static void iser_route_handler(struct rdma_cm_id *cma_id)694695{695696 struct rdma_conn_param conn_param;696697 int ret;···717720 goto failure;718721 }719722720720- return 0;723723+ return;721724failure:722722- return iser_connect_error(cma_id);725725+ iser_connect_error(cma_id);723726}724727725728static void iser_connected_handler(struct rdma_cm_id *cma_id)···732735 iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);733736734737 ib_conn = (struct iser_conn *)cma_id->context;735735- ib_conn->state = ISER_CONN_UP;736736- wake_up_interruptible(&ib_conn->wait);738738+ if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_PENDING, ISER_CONN_UP))739739+ wake_up_interruptible(&ib_conn->wait);737740}738741739739-static int iser_disconnected_handler(struct rdma_cm_id *cma_id)742742+static void iser_disconnected_handler(struct rdma_cm_id *cma_id)740743{741744 struct iser_conn *ib_conn;742742- int ret;743745744746 ib_conn = (struct iser_conn *)cma_id->context;745747···758762 ib_conn->state = ISER_CONN_DOWN;759763 wake_up_interruptible(&ib_conn->wait);760764 }761761-762762- ret = iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */763763- return ret;764765}765766766767static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)767768{768768- int ret = 0;769769-770769 iser_info("event %d status %d conn %p id %p\n",771770 event->event, event->status, cma_id->context, cma_id);772771773772 switch (event->event) {774773 case RDMA_CM_EVENT_ADDR_RESOLVED:775775- ret = iser_addr_handler(cma_id);774774+ iser_addr_handler(cma_id);776775 break;777776 case RDMA_CM_EVENT_ROUTE_RESOLVED:778778- ret = iser_route_handler(cma_id);777777+ iser_route_handler(cma_id);779778 break;780779 case RDMA_CM_EVENT_ESTABLISHED:781780 iser_connected_handler(cma_id);···780789 case RDMA_CM_EVENT_CONNECT_ERROR:781790 case RDMA_CM_EVENT_UNREACHABLE:782791 case RDMA_CM_EVENT_REJECTED:783783- ret = iser_connect_error(cma_id);792792+ iser_connect_error(cma_id);784793 break;785794 case RDMA_CM_EVENT_DISCONNECTED:786795 case RDMA_CM_EVENT_DEVICE_REMOVAL:787796 case RDMA_CM_EVENT_ADDR_CHANGE:788788- ret = iser_disconnected_handler(cma_id);797797+ iser_disconnected_handler(cma_id);789798 break;790799 default:791800 iser_err("Unexpected RDMA CM event (%d)\n", event->event);792801 break;793802 }794794- return ret;803803+ return 0;795804}796805797806void iser_conn_init(struct iser_conn *ib_conn)···800809 init_waitqueue_head(&ib_conn->wait);801810 ib_conn->post_recv_buf_count = 0;802811 atomic_set(&ib_conn->post_send_buf_count, 0);803803- atomic_set(&ib_conn->refcount, 1); /* ref ib conn allocation */812812+ init_completion(&ib_conn->stop_completion);804813 INIT_LIST_HEAD(&ib_conn->conn_list);805814 spin_lock_init(&ib_conn->lock);806815}···828837829838 ib_conn->state = ISER_CONN_PENDING;830839831831- iser_conn_get(ib_conn); /* ref ib conn's cma id */832840 ib_conn->cma_id = rdma_create_id(iser_cma_handler,833841 (void *)ib_conn,834842 RDMA_PS_TCP, IB_QPT_RC);···864874 ib_conn->cma_id = NULL;865875addr_failure:866876 ib_conn->state = ISER_CONN_DOWN;867867- iser_conn_put(ib_conn, 1); /* deref ib conn's cma id */868877connect_failure:869869- iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */878878+ iser_conn_release(ib_conn);870879 return err;871880}872881
+512-160
drivers/infiniband/ulp/srp/ib_srp.c
···3030 * SOFTWARE.3131 */32323333-#define pr_fmt(fmt) PFX fmt3333+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt34343535#include <linux/module.h>3636#include <linux/init.h>···6666static unsigned int cmd_sg_entries;6767static unsigned int indirect_sg_entries;6868static bool allow_ext_sg;6969+static bool prefer_fr;7070+static bool register_always;6971static int topspin_workarounds = 1;70727173module_param(srp_sg_tablesize, uint, 0444);···8886module_param(topspin_workarounds, int, 0444);8987MODULE_PARM_DESC(topspin_workarounds,9088 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");8989+9090+module_param(prefer_fr, bool, 0444);9191+MODULE_PARM_DESC(prefer_fr,9292+"Whether to use fast registration if both FMR and fast registration are supported");9393+9494+module_param(register_always, bool, 0444);9595+MODULE_PARM_DESC(register_always,9696+ "Use memory registration even for contiguous memory regions");91979298static struct kernel_param_ops srp_tmo_ops;9399···298288 return 0;299289}300290291291+static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)292292+{293293+ struct srp_device *dev = target->srp_host->srp_dev;294294+ struct ib_fmr_pool_param fmr_param;295295+296296+ memset(&fmr_param, 0, sizeof(fmr_param));297297+ fmr_param.pool_size = target->scsi_host->can_queue;298298+ fmr_param.dirty_watermark = fmr_param.pool_size / 4;299299+ fmr_param.cache = 1;300300+ fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;301301+ fmr_param.page_shift = ilog2(dev->mr_page_size);302302+ fmr_param.access = (IB_ACCESS_LOCAL_WRITE |303303+ IB_ACCESS_REMOTE_WRITE |304304+ IB_ACCESS_REMOTE_READ);305305+306306+ return ib_create_fmr_pool(dev->pd, &fmr_param);307307+}308308+309309+/**310310+ * srp_destroy_fr_pool() - free the resources owned by a pool311311+ * @pool: Fast registration pool to be destroyed.312312+ */313313+static void srp_destroy_fr_pool(struct srp_fr_pool *pool)314314+{315315+ int i;316316+ struct srp_fr_desc *d;317317+318318+ if (!pool)319319+ return;320320+321321+ for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {322322+ if (d->frpl)323323+ ib_free_fast_reg_page_list(d->frpl);324324+ if (d->mr)325325+ ib_dereg_mr(d->mr);326326+ }327327+ kfree(pool);328328+}329329+330330+/**331331+ * srp_create_fr_pool() - allocate and initialize a pool for fast registration332332+ * @device: IB device to allocate fast registration descriptors for.333333+ * @pd: Protection domain associated with the FR descriptors.334334+ * @pool_size: Number of descriptors to allocate.335335+ * @max_page_list_len: Maximum fast registration work request page list length.336336+ */337337+static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,338338+ struct ib_pd *pd, int pool_size,339339+ int max_page_list_len)340340+{341341+ struct srp_fr_pool *pool;342342+ struct srp_fr_desc *d;343343+ struct ib_mr *mr;344344+ struct ib_fast_reg_page_list *frpl;345345+ int i, ret = -EINVAL;346346+347347+ if (pool_size <= 0)348348+ goto err;349349+ ret = -ENOMEM;350350+ pool = kzalloc(sizeof(struct srp_fr_pool) +351351+ pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);352352+ if (!pool)353353+ goto err;354354+ pool->size = pool_size;355355+ pool->max_page_list_len = max_page_list_len;356356+ spin_lock_init(&pool->lock);357357+ INIT_LIST_HEAD(&pool->free_list);358358+359359+ for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {360360+ mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);361361+ if (IS_ERR(mr)) {362362+ ret = PTR_ERR(mr);363363+ goto destroy_pool;364364+ }365365+ d->mr = mr;366366+ frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);367367+ if (IS_ERR(frpl)) {368368+ ret = PTR_ERR(frpl);369369+ goto destroy_pool;370370+ }371371+ d->frpl = frpl;372372+ list_add_tail(&d->entry, &pool->free_list);373373+ }374374+375375+out:376376+ return pool;377377+378378+destroy_pool:379379+ srp_destroy_fr_pool(pool);380380+381381+err:382382+ pool = ERR_PTR(ret);383383+ goto out;384384+}385385+386386+/**387387+ * srp_fr_pool_get() - obtain a descriptor suitable for fast registration388388+ * @pool: Pool to obtain descriptor from.389389+ */390390+static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)391391+{392392+ struct srp_fr_desc *d = NULL;393393+ unsigned long flags;394394+395395+ spin_lock_irqsave(&pool->lock, flags);396396+ if (!list_empty(&pool->free_list)) {397397+ d = list_first_entry(&pool->free_list, typeof(*d), entry);398398+ list_del(&d->entry);399399+ }400400+ spin_unlock_irqrestore(&pool->lock, flags);401401+402402+ return d;403403+}404404+405405+/**406406+ * srp_fr_pool_put() - put an FR descriptor back in the free list407407+ * @pool: Pool the descriptor was allocated from.408408+ * @desc: Pointer to an array of fast registration descriptor pointers.409409+ * @n: Number of descriptors to put back.410410+ *411411+ * Note: The caller must already have queued an invalidation request for412412+ * desc->mr->rkey before calling this function.413413+ */414414+static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,415415+ int n)416416+{417417+ unsigned long flags;418418+ int i;419419+420420+ spin_lock_irqsave(&pool->lock, flags);421421+ for (i = 0; i < n; i++)422422+ list_add(&desc[i]->entry, &pool->free_list);423423+ spin_unlock_irqrestore(&pool->lock, flags);424424+}425425+426426+static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)427427+{428428+ struct srp_device *dev = target->srp_host->srp_dev;429429+430430+ return srp_create_fr_pool(dev->dev, dev->pd,431431+ target->scsi_host->can_queue,432432+ dev->max_pages_per_mr);433433+}434434+301435static int srp_create_target_ib(struct srp_target_port *target)302436{437437+ struct srp_device *dev = target->srp_host->srp_dev;303438 struct ib_qp_init_attr *init_attr;304439 struct ib_cq *recv_cq, *send_cq;305440 struct ib_qp *qp;441441+ struct ib_fmr_pool *fmr_pool = NULL;442442+ struct srp_fr_pool *fr_pool = NULL;443443+ const int m = 1 + dev->use_fast_reg;306444 int ret;307445308446 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);309447 if (!init_attr)310448 return -ENOMEM;311449312312- recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,313313- srp_recv_completion, NULL, target,450450+ recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, target,314451 target->queue_size, target->comp_vector);315452 if (IS_ERR(recv_cq)) {316453 ret = PTR_ERR(recv_cq);317454 goto err;318455 }319456320320- send_cq = ib_create_cq(target->srp_host->srp_dev->dev,321321- srp_send_completion, NULL, target,322322- target->queue_size, target->comp_vector);457457+ send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, target,458458+ m * target->queue_size, target->comp_vector);323459 if (IS_ERR(send_cq)) {324460 ret = PTR_ERR(send_cq);325461 goto err_recv_cq;···474318 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);475319476320 init_attr->event_handler = srp_qp_event;477477- init_attr->cap.max_send_wr = target->queue_size;321321+ init_attr->cap.max_send_wr = m * target->queue_size;478322 init_attr->cap.max_recv_wr = target->queue_size;479323 init_attr->cap.max_recv_sge = 1;480324 init_attr->cap.max_send_sge = 1;481481- init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;325325+ init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;482326 init_attr->qp_type = IB_QPT_RC;483327 init_attr->send_cq = send_cq;484328 init_attr->recv_cq = recv_cq;485329486486- qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);330330+ qp = ib_create_qp(dev->pd, init_attr);487331 if (IS_ERR(qp)) {488332 ret = PTR_ERR(qp);489333 goto err_send_cq;···492336 ret = srp_init_qp(target, qp);493337 if (ret)494338 goto err_qp;339339+340340+ if (dev->use_fast_reg && dev->has_fr) {341341+ fr_pool = srp_alloc_fr_pool(target);342342+ if (IS_ERR(fr_pool)) {343343+ ret = PTR_ERR(fr_pool);344344+ shost_printk(KERN_WARNING, target->scsi_host, PFX345345+ "FR pool allocation failed (%d)\n", ret);346346+ goto err_qp;347347+ }348348+ if (target->fr_pool)349349+ srp_destroy_fr_pool(target->fr_pool);350350+ target->fr_pool = fr_pool;351351+ } else if (!dev->use_fast_reg && dev->has_fmr) {352352+ fmr_pool = srp_alloc_fmr_pool(target);353353+ if (IS_ERR(fmr_pool)) {354354+ ret = PTR_ERR(fmr_pool);355355+ shost_printk(KERN_WARNING, target->scsi_host, PFX356356+ "FMR pool allocation failed (%d)\n", ret);357357+ goto err_qp;358358+ }359359+ if (target->fmr_pool)360360+ ib_destroy_fmr_pool(target->fmr_pool);361361+ target->fmr_pool = fmr_pool;362362+ }495363496364 if (target->qp)497365 ib_destroy_qp(target->qp);···551371 */552372static void srp_free_target_ib(struct srp_target_port *target)553373{374374+ struct srp_device *dev = target->srp_host->srp_dev;554375 int i;555376377377+ if (dev->use_fast_reg) {378378+ if (target->fr_pool)379379+ srp_destroy_fr_pool(target->fr_pool);380380+ } else {381381+ if (target->fmr_pool)382382+ ib_destroy_fmr_pool(target->fmr_pool);383383+ }556384 ib_destroy_qp(target->qp);557385 ib_destroy_cq(target->send_cq);558386 ib_destroy_cq(target->recv_cq);···765577766578static void srp_free_req_data(struct srp_target_port *target)767579{768768- struct ib_device *ibdev = target->srp_host->srp_dev->dev;580580+ struct srp_device *dev = target->srp_host->srp_dev;581581+ struct ib_device *ibdev = dev->dev;769582 struct srp_request *req;770583 int i;771584···775586776587 for (i = 0; i < target->req_ring_size; ++i) {777588 req = &target->req_ring[i];778778- kfree(req->fmr_list);589589+ if (dev->use_fast_reg)590590+ kfree(req->fr_list);591591+ else592592+ kfree(req->fmr_list);779593 kfree(req->map_page);780594 if (req->indirect_dma_addr) {781595 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,···797605 struct srp_device *srp_dev = target->srp_host->srp_dev;798606 struct ib_device *ibdev = srp_dev->dev;799607 struct srp_request *req;608608+ void *mr_list;800609 dma_addr_t dma_addr;801610 int i, ret = -ENOMEM;802611···810617811618 for (i = 0; i < target->req_ring_size; ++i) {812619 req = &target->req_ring[i];813813- req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),814814- GFP_KERNEL);815815- req->map_page = kmalloc(SRP_FMR_SIZE * sizeof(void *),816816- GFP_KERNEL);620620+ mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),621621+ GFP_KERNEL);622622+ if (!mr_list)623623+ goto out;624624+ if (srp_dev->use_fast_reg)625625+ req->fr_list = mr_list;626626+ else627627+ req->fmr_list = mr_list;628628+ req->map_page = kmalloc(srp_dev->max_pages_per_mr *629629+ sizeof(void *), GFP_KERNEL);630630+ if (!req->map_page)631631+ goto out;817632 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);818818- if (!req->fmr_list || !req->map_page || !req->indirect_desc)633633+ if (!req->indirect_desc)819634 goto out;820635821636 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,···960759 }961760}962761762762+static int srp_inv_rkey(struct srp_target_port *target, u32 rkey)763763+{764764+ struct ib_send_wr *bad_wr;765765+ struct ib_send_wr wr = {766766+ .opcode = IB_WR_LOCAL_INV,767767+ .wr_id = LOCAL_INV_WR_ID_MASK,768768+ .next = NULL,769769+ .num_sge = 0,770770+ .send_flags = 0,771771+ .ex.invalidate_rkey = rkey,772772+ };773773+774774+ return ib_post_send(target->qp, &wr, &bad_wr);775775+}776776+963777static void srp_unmap_data(struct scsi_cmnd *scmnd,964778 struct srp_target_port *target,965779 struct srp_request *req)966780{967967- struct ib_device *ibdev = target->srp_host->srp_dev->dev;968968- struct ib_pool_fmr **pfmr;781781+ struct srp_device *dev = target->srp_host->srp_dev;782782+ struct ib_device *ibdev = dev->dev;783783+ int i, res;969784970785 if (!scsi_sglist(scmnd) ||971786 (scmnd->sc_data_direction != DMA_TO_DEVICE &&972787 scmnd->sc_data_direction != DMA_FROM_DEVICE))973788 return;974789975975- pfmr = req->fmr_list;976976- while (req->nfmr--)977977- ib_fmr_pool_unmap(*pfmr++);790790+ if (dev->use_fast_reg) {791791+ struct srp_fr_desc **pfr;792792+793793+ for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {794794+ res = srp_inv_rkey(target, (*pfr)->mr->rkey);795795+ if (res < 0) {796796+ shost_printk(KERN_ERR, target->scsi_host, PFX797797+ "Queueing INV WR for rkey %#x failed (%d)\n",798798+ (*pfr)->mr->rkey, res);799799+ queue_work(system_long_wq,800800+ &target->tl_err_work);801801+ }802802+ }803803+ if (req->nmdesc)804804+ srp_fr_pool_put(target->fr_pool, req->fr_list,805805+ req->nmdesc);806806+ } else {807807+ struct ib_pool_fmr **pfmr;808808+809809+ for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)810810+ ib_fmr_pool_unmap(*pfmr);811811+ }978812979813 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),980814 scmnd->sc_data_direction);···10498131050814/**1051815 * srp_free_req() - Unmap data and add request to the free request list.816816+ * @target: SRP target port.817817+ * @req: Request to be freed.818818+ * @scmnd: SCSI command associated with @req.819819+ * @req_lim_delta: Amount to be added to @target->req_lim.1052820 */1053821static void srp_free_req(struct srp_target_port *target,1054822 struct srp_request *req, struct scsi_cmnd *scmnd,···1122882 * callbacks will have finished before a new QP is allocated.1123883 */1124884 ret = srp_new_cm_id(target);11251125- /*11261126- * Whether or not creating a new CM ID succeeded, create a new11271127- * QP. This guarantees that all completion callback function11281128- * invocations have finished before request resetting starts.11291129- */11301130- if (ret == 0)11311131- ret = srp_create_target_ib(target);11321132- else11331133- srp_create_target_ib(target);11348851135886 for (i = 0; i < target->req_ring_size; ++i) {1136887 struct srp_request *req = &target->req_ring[i];1137888 srp_finish_req(target, req, NULL, DID_RESET << 16);1138889 }890890+891891+ /*892892+ * Whether or not creating a new CM ID succeeded, create a new893893+ * QP. This guarantees that all callback functions for the old QP have894894+ * finished before any send requests are posted on the new QP.895895+ */896896+ ret += srp_create_target_ib(target);11398971140898 INIT_LIST_HEAD(&target->free_tx);1141899 for (i = 0; i < target->queue_size; ++i)···1166928static int srp_map_finish_fmr(struct srp_map_state *state,1167929 struct srp_target_port *target)1168930{11691169- struct srp_device *dev = target->srp_host->srp_dev;1170931 struct ib_pool_fmr *fmr;1171932 u64 io_addr = 0;117293311731173- if (!state->npages)11741174- return 0;11751175-11761176- if (state->npages == 1) {11771177- srp_map_desc(state, state->base_dma_addr, state->fmr_len,11781178- target->rkey);11791179- state->npages = state->fmr_len = 0;11801180- return 0;11811181- }11821182-11831183- fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages,934934+ fmr = ib_fmr_pool_map_phys(target->fmr_pool, state->pages,1184935 state->npages, io_addr);1185936 if (IS_ERR(fmr))1186937 return PTR_ERR(fmr);11879381188939 *state->next_fmr++ = fmr;11891189- state->nfmr++;940940+ state->nmdesc++;119094111911191- srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey);11921192- state->npages = state->fmr_len = 0;942942+ srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);943943+1193944 return 0;945945+}946946+947947+static int srp_map_finish_fr(struct srp_map_state *state,948948+ struct srp_target_port *target)949949+{950950+ struct srp_device *dev = target->srp_host->srp_dev;951951+ struct ib_send_wr *bad_wr;952952+ struct ib_send_wr wr;953953+ struct srp_fr_desc *desc;954954+ u32 rkey;955955+956956+ desc = srp_fr_pool_get(target->fr_pool);957957+ if (!desc)958958+ return -ENOMEM;959959+960960+ rkey = ib_inc_rkey(desc->mr->rkey);961961+ ib_update_fast_reg_key(desc->mr, rkey);962962+963963+ memcpy(desc->frpl->page_list, state->pages,964964+ sizeof(state->pages[0]) * state->npages);965965+966966+ memset(&wr, 0, sizeof(wr));967967+ wr.opcode = IB_WR_FAST_REG_MR;968968+ wr.wr_id = FAST_REG_WR_ID_MASK;969969+ wr.wr.fast_reg.iova_start = state->base_dma_addr;970970+ wr.wr.fast_reg.page_list = desc->frpl;971971+ wr.wr.fast_reg.page_list_len = state->npages;972972+ wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);973973+ wr.wr.fast_reg.length = state->dma_len;974974+ wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |975975+ IB_ACCESS_REMOTE_READ |976976+ IB_ACCESS_REMOTE_WRITE);977977+ wr.wr.fast_reg.rkey = desc->mr->lkey;978978+979979+ *state->next_fr++ = desc;980980+ state->nmdesc++;981981+982982+ srp_map_desc(state, state->base_dma_addr, state->dma_len,983983+ desc->mr->rkey);984984+985985+ return ib_post_send(target->qp, &wr, &bad_wr);986986+}987987+988988+static int srp_finish_mapping(struct srp_map_state *state,989989+ struct srp_target_port *target)990990+{991991+ int ret = 0;992992+993993+ if (state->npages == 0)994994+ return 0;995995+996996+ if (state->npages == 1 && !register_always)997997+ srp_map_desc(state, state->base_dma_addr, state->dma_len,998998+ target->rkey);999999+ else10001000+ ret = target->srp_host->srp_dev->use_fast_reg ?10011001+ srp_map_finish_fr(state, target) :10021002+ srp_map_finish_fmr(state, target);10031003+10041004+ if (ret == 0) {10051005+ state->npages = 0;10061006+ state->dma_len = 0;10071007+ }10081008+10091009+ return ret;11941010}1195101111961012static void srp_map_update_start(struct srp_map_state *state,···1259967static int srp_map_sg_entry(struct srp_map_state *state,1260968 struct srp_target_port *target,1261969 struct scatterlist *sg, int sg_index,12621262- int use_fmr)970970+ bool use_mr)1263971{1264972 struct srp_device *dev = target->srp_host->srp_dev;1265973 struct ib_device *ibdev = dev->dev;···1271979 if (!dma_len)1272980 return 0;127398112741274- if (use_fmr == SRP_MAP_NO_FMR) {12751275- /* Once we're in direct map mode for a request, we don't12761276- * go back to FMR mode, so no need to update anything982982+ if (!use_mr) {983983+ /*984984+ * Once we're in direct map mode for a request, we don't985985+ * go back to FMR or FR mode, so no need to update anything1277986 * other than the descriptor.1278987 */1279988 srp_map_desc(state, dma_addr, dma_len, target->rkey);1280989 return 0;1281990 }128299112831283- /* If we start at an offset into the FMR page, don't merge into12841284- * the current FMR. Finish it out, and use the kernel's MR for this12851285- * sg entry. This is to avoid potential bugs on some SRP targets12861286- * that were never quite defined, but went away when the initiator12871287- * avoided using FMR on such page fragments.992992+ /*993993+ * Since not all RDMA HW drivers support non-zero page offsets for994994+ * FMR, if we start at an offset into a page, don't merge into the995995+ * current FMR mapping. Finish it out, and use the kernel's MR for996996+ * this sg entry.1288997 */12891289- if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) {12901290- ret = srp_map_finish_fmr(state, target);998998+ if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||999999+ dma_len > dev->mr_max_size) {10001000+ ret = srp_finish_mapping(state, target);12911001 if (ret)12921002 return ret;12931003···12981004 return 0;12991005 }1300100613011301- /* If this is the first sg to go into the FMR, save our position.13021302- * We need to know the first unmapped entry, its index, and the13031303- * first unmapped address within that entry to be able to restart13041304- * mapping after an error.10071007+ /*10081008+ * If this is the first sg that will be mapped via FMR or via FR, save10091009+ * our position. We need to know the first unmapped entry, its index,10101010+ * and the first unmapped address within that entry to be able to10111011+ * restart mapping after an error.13051012 */13061013 if (!state->unmapped_sg)13071014 srp_map_update_start(state, sg, sg_index, dma_addr);1308101513091016 while (dma_len) {13101310- if (state->npages == SRP_FMR_SIZE) {13111311- ret = srp_map_finish_fmr(state, target);10171017+ unsigned offset = dma_addr & ~dev->mr_page_mask;10181018+ if (state->npages == dev->max_pages_per_mr || offset != 0) {10191019+ ret = srp_finish_mapping(state, target);13121020 if (ret)13131021 return ret;1314102213151023 srp_map_update_start(state, sg, sg_index, dma_addr);13161024 }1317102513181318- len = min_t(unsigned int, dma_len, dev->fmr_page_size);10261026+ len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);1319102713201028 if (!state->npages)13211029 state->base_dma_addr = dma_addr;13221322- state->pages[state->npages++] = dma_addr;13231323- state->fmr_len += len;10301030+ state->pages[state->npages++] = dma_addr & dev->mr_page_mask;10311031+ state->dma_len += len;13241032 dma_addr += len;13251033 dma_len -= len;13261034 }1327103513281328- /* If the last entry of the FMR wasn't a full page, then we need to10361036+ /*10371037+ * If the last entry of the MR wasn't a full page, then we need to13291038 * close it out and start a new one -- we can only merge at page13301039 * boundries.13311040 */13321041 ret = 0;13331333- if (len != dev->fmr_page_size) {13341334- ret = srp_map_finish_fmr(state, target);10421042+ if (len != dev->mr_page_size) {10431043+ ret = srp_finish_mapping(state, target);13351044 if (!ret)13361045 srp_map_update_start(state, NULL, 0, 0);13371046 }13381047 return ret;13391048}1340104910501050+static int srp_map_sg(struct srp_map_state *state,10511051+ struct srp_target_port *target, struct srp_request *req,10521052+ struct scatterlist *scat, int count)10531053+{10541054+ struct srp_device *dev = target->srp_host->srp_dev;10551055+ struct ib_device *ibdev = dev->dev;10561056+ struct scatterlist *sg;10571057+ int i;10581058+ bool use_mr;10591059+10601060+ state->desc = req->indirect_desc;10611061+ state->pages = req->map_page;10621062+ if (dev->use_fast_reg) {10631063+ state->next_fr = req->fr_list;10641064+ use_mr = !!target->fr_pool;10651065+ } else {10661066+ state->next_fmr = req->fmr_list;10671067+ use_mr = !!target->fmr_pool;10681068+ }10691069+10701070+ for_each_sg(scat, sg, count, i) {10711071+ if (srp_map_sg_entry(state, target, sg, i, use_mr)) {10721072+ /*10731073+ * Memory registration failed, so backtrack to the10741074+ * first unmapped entry and continue on without using10751075+ * memory registration.10761076+ */10771077+ dma_addr_t dma_addr;10781078+ unsigned int dma_len;10791079+10801080+backtrack:10811081+ sg = state->unmapped_sg;10821082+ i = state->unmapped_index;10831083+10841084+ dma_addr = ib_sg_dma_address(ibdev, sg);10851085+ dma_len = ib_sg_dma_len(ibdev, sg);10861086+ dma_len -= (state->unmapped_addr - dma_addr);10871087+ dma_addr = state->unmapped_addr;10881088+ use_mr = false;10891089+ srp_map_desc(state, dma_addr, dma_len, target->rkey);10901090+ }10911091+ }10921092+10931093+ if (use_mr && srp_finish_mapping(state, target))10941094+ goto backtrack;10951095+10961096+ req->nmdesc = state->nmdesc;10971097+10981098+ return 0;10991099+}11001100+13411101static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,13421102 struct srp_request *req)13431103{13441344- struct scatterlist *scat, *sg;11041104+ struct scatterlist *scat;13451105 struct srp_cmd *cmd = req->cmd->buf;13461346- int i, len, nents, count, use_fmr;11061106+ int len, nents, count;13471107 struct srp_device *dev;13481108 struct ib_device *ibdev;13491109 struct srp_map_state state;···14291081 fmt = SRP_DATA_DESC_DIRECT;14301082 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);1431108314321432- if (count == 1) {10841084+ if (count == 1 && !register_always) {14331085 /*14341086 * The midlayer only generated a single gather/scatter14351087 * entry, or DMA mapping coalesced everything to a···14421094 buf->key = cpu_to_be32(target->rkey);14431095 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));1444109614451445- req->nfmr = 0;10971097+ req->nmdesc = 0;14461098 goto map_complete;14471099 }1448110014491449- /* We have more than one scatter/gather entry, so build our indirect14501450- * descriptor table, trying to merge as many entries with FMR as we14511451- * can.11011101+ /*11021102+ * We have more than one scatter/gather entry, so build our indirect11031103+ * descriptor table, trying to merge as many entries as we can.14521104 */14531105 indirect_hdr = (void *) cmd->add_data;14541106···14561108 target->indirect_size, DMA_TO_DEVICE);1457110914581110 memset(&state, 0, sizeof(state));14591459- state.desc = req->indirect_desc;14601460- state.pages = req->map_page;14611461- state.next_fmr = req->fmr_list;14621462-14631463- use_fmr = dev->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR;14641464-14651465- for_each_sg(scat, sg, count, i) {14661466- if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) {14671467- /* FMR mapping failed, so backtrack to the first14681468- * unmapped entry and continue on without using FMR.14691469- */14701470- dma_addr_t dma_addr;14711471- unsigned int dma_len;14721472-14731473-backtrack:14741474- sg = state.unmapped_sg;14751475- i = state.unmapped_index;14761476-14771477- dma_addr = ib_sg_dma_address(ibdev, sg);14781478- dma_len = ib_sg_dma_len(ibdev, sg);14791479- dma_len -= (state.unmapped_addr - dma_addr);14801480- dma_addr = state.unmapped_addr;14811481- use_fmr = SRP_MAP_NO_FMR;14821482- srp_map_desc(&state, dma_addr, dma_len, target->rkey);14831483- }14841484- }14851485-14861486- if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(&state, target))14871487- goto backtrack;11111111+ srp_map_sg(&state, target, req, scat, count);1488111214891113 /* We've mapped the request, now pull as much of the indirect14901114 * descriptor table as we can into the command buffer. If this···14641144 * guaranteed to fit into the command, as the SCSI layer won't14651145 * give us more S/G entries than we allow.14661146 */14671467- req->nfmr = state.nfmr;14681147 if (state.ndesc == 1) {14691469- /* FMR mapping was able to collapse this to one entry,11481148+ /*11491149+ * Memory registration collapsed the sg-list into one entry,14701150 * so use a direct descriptor.14711151 */14721152 struct srp_direct_buf *buf = (void *) cmd->add_data;···1775145517761456/**17771457 * srp_tl_err_work() - handle a transport layer error14581458+ * @work: Work structure embedded in an SRP target port.17781459 *17791460 * Note: This function may get invoked before the rport has been created,17801461 * hence the target->rport test.···17891468 srp_start_tl_fail_timers(target->rport);17901469}1791147017921792-static void srp_handle_qp_err(enum ib_wc_status wc_status, bool send_err,17931793- struct srp_target_port *target)14711471+static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,14721472+ bool send_err, struct srp_target_port *target)17941473{17951474 if (target->connected && !target->qp_in_error) {17961796- shost_printk(KERN_ERR, target->scsi_host,17971797- PFX "failed %s status %d\n",17981798- send_err ? "send" : "receive",17991799- wc_status);14751475+ if (wr_id & LOCAL_INV_WR_ID_MASK) {14761476+ shost_printk(KERN_ERR, target->scsi_host, PFX14771477+ "LOCAL_INV failed with status %d\n",14781478+ wc_status);14791479+ } else if (wr_id & FAST_REG_WR_ID_MASK) {14801480+ shost_printk(KERN_ERR, target->scsi_host, PFX14811481+ "FAST_REG_MR failed status %d\n",14821482+ wc_status);14831483+ } else {14841484+ shost_printk(KERN_ERR, target->scsi_host,14851485+ PFX "failed %s status %d for iu %p\n",14861486+ send_err ? "send" : "receive",14871487+ wc_status, (void *)(uintptr_t)wr_id);14881488+ }18001489 queue_work(system_long_wq, &target->tl_err_work);18011490 }18021491 target->qp_in_error = true;···18221491 if (likely(wc.status == IB_WC_SUCCESS)) {18231492 srp_handle_recv(target, &wc);18241493 } else {18251825- srp_handle_qp_err(wc.status, false, target);14941494+ srp_handle_qp_err(wc.wr_id, wc.status, false, target);18261495 }18271496 }18281497}···18381507 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;18391508 list_add(&iu->list, &target->free_tx);18401509 } else {18411841- srp_handle_qp_err(wc.status, true, target);15101510+ srp_handle_qp_err(wc.wr_id, wc.status, true, target);18421511 }18431512 }18441513}···18521521 struct srp_cmd *cmd;18531522 struct ib_device *dev;18541523 unsigned long flags;18551855- int len, result;15241524+ int len, ret;18561525 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;1857152618581527 /*···18641533 if (in_scsi_eh)18651534 mutex_lock(&rport->mutex);1866153518671867- result = srp_chkready(target->rport);18681868- if (unlikely(result)) {18691869- scmnd->result = result;18701870- scmnd->scsi_done(scmnd);18711871- goto unlock_rport;18721872- }15361536+ scmnd->result = srp_chkready(target->rport);15371537+ if (unlikely(scmnd->result))15381538+ goto err;1873153918741540 spin_lock_irqsave(&target->lock, flags);18751541 iu = __srp_get_tx_iu(target, SRP_IU_CMD);···18811553 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,18821554 DMA_TO_DEVICE);1883155518841884- scmnd->result = 0;18851556 scmnd->host_scribble = (void *) req;1886155718871558 cmd = iu->buf;···18971570 len = srp_map_data(scmnd, target, req);18981571 if (len < 0) {18991572 shost_printk(KERN_ERR, target->scsi_host,19001900- PFX "Failed to map data\n");15731573+ PFX "Failed to map data (%d)\n", len);15741574+ /*15751575+ * If we ran out of memory descriptors (-ENOMEM) because an15761576+ * application is queuing many requests with more than15771577+ * max_pages_per_mr sg-list elements, tell the SCSI mid-layer15781578+ * to reduce queue depth temporarily.15791579+ */15801580+ scmnd->result = len == -ENOMEM ?15811581+ DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;19011582 goto err_iu;19021583 }19031584···19171582 goto err_unmap;19181583 }1919158415851585+ ret = 0;15861586+19201587unlock_rport:19211588 if (in_scsi_eh)19221589 mutex_unlock(&rport->mutex);1923159019241924- return 0;15911591+ return ret;1925159219261593err_unmap:19271594 srp_unmap_data(scmnd, target, req);···19311594err_iu:19321595 srp_put_tx_iu(target, iu, SRP_IU_CMD);1933159615971597+ /*15981598+ * Avoid that the loops that iterate over the request ring can15991599+ * encounter a dangling SCSI command pointer.16001600+ */16011601+ req->scmnd = NULL;16021602+19341603 spin_lock_irqsave(&target->lock, flags);19351604 list_add(&req->list, &target->free_reqs);1936160519371606err_unlock:19381607 spin_unlock_irqrestore(&target->lock, flags);1939160819401940- if (in_scsi_eh)19411941- mutex_unlock(&rport->mutex);16091609+err:16101610+ if (scmnd->result) {16111611+ scmnd->scsi_done(scmnd);16121612+ ret = 0;16131613+ } else {16141614+ ret = SCSI_MLQUEUE_HOST_BUSY;16151615+ }1942161619431943- return SCSI_MLQUEUE_HOST_BUSY;16171617+ goto unlock_rport;19441618}1945161919461620/*···2658231026592311/**26602312 * srp_conn_unique() - check whether the connection to a target is unique23132313+ * @host: SRP host.23142314+ * @target: SRP target port.26612315 */26622316static bool srp_conn_unique(struct srp_host *host,26632317 struct srp_target_port *target)···29552605 container_of(dev, struct srp_host, dev);29562606 struct Scsi_Host *target_host;29572607 struct srp_target_port *target;29582958- struct ib_device *ibdev = host->srp_dev->dev;26082608+ struct srp_device *srp_dev = host->srp_dev;26092609+ struct ib_device *ibdev = srp_dev->dev;29592610 int ret;2960261129612612 target_host = scsi_host_alloc(&srp_template,···30012650 goto err;30022651 }3003265230043004- if (!host->srp_dev->fmr_pool && !target->allow_ext_sg &&30053005- target->cmd_sg_cnt < target->sg_tablesize) {30063006- pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");26532653+ if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&26542654+ target->cmd_sg_cnt < target->sg_tablesize) {26552655+ pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");30072656 target->sg_tablesize = target->cmd_sg_cnt;30082657 }30092658···31412790{31422791 struct srp_device *srp_dev;31432792 struct ib_device_attr *dev_attr;31443144- struct ib_fmr_pool_param fmr_param;31452793 struct srp_host *host;31463146- int max_pages_per_fmr, fmr_page_shift, s, e, p;27942794+ int mr_page_shift, s, e, p;27952795+ u64 max_pages_per_mr;3147279631482797 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);31492798 if (!dev_attr)···31582807 if (!srp_dev)31592808 goto free_attr;3160280928102810+ srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&28112811+ device->map_phys_fmr && device->unmap_fmr);28122812+ srp_dev->has_fr = (dev_attr->device_cap_flags &28132813+ IB_DEVICE_MEM_MGT_EXTENSIONS);28142814+ if (!srp_dev->has_fmr && !srp_dev->has_fr)28152815+ dev_warn(&device->dev, "neither FMR nor FR is supported\n");28162816+28172817+ srp_dev->use_fast_reg = (srp_dev->has_fr &&28182818+ (!srp_dev->has_fmr || prefer_fr));28192819+31612820 /*31622821 * Use the smallest page size supported by the HCA, down to a31632822 * minimum of 4096 bytes. We're unlikely to build large sglists31642823 * out of smaller entries.31652824 */31663166- fmr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);31673167- srp_dev->fmr_page_size = 1 << fmr_page_shift;31683168- srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1);31693169- srp_dev->fmr_max_size = srp_dev->fmr_page_size * SRP_FMR_SIZE;28252825+ mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);28262826+ srp_dev->mr_page_size = 1 << mr_page_shift;28272827+ srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);28282828+ max_pages_per_mr = dev_attr->max_mr_size;28292829+ do_div(max_pages_per_mr, srp_dev->mr_page_size);28302830+ srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,28312831+ max_pages_per_mr);28322832+ if (srp_dev->use_fast_reg) {28332833+ srp_dev->max_pages_per_mr =28342834+ min_t(u32, srp_dev->max_pages_per_mr,28352835+ dev_attr->max_fast_reg_page_list_len);28362836+ }28372837+ srp_dev->mr_max_size = srp_dev->mr_page_size *28382838+ srp_dev->max_pages_per_mr;28392839+ pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",28402840+ device->name, mr_page_shift, dev_attr->max_mr_size,28412841+ dev_attr->max_fast_reg_page_list_len,28422842+ srp_dev->max_pages_per_mr, srp_dev->mr_max_size);3170284331712844 INIT_LIST_HEAD(&srp_dev->dev_list);31722845···32052830 IB_ACCESS_REMOTE_WRITE);32062831 if (IS_ERR(srp_dev->mr))32072832 goto err_pd;32083208-32093209- for (max_pages_per_fmr = SRP_FMR_SIZE;32103210- max_pages_per_fmr >= SRP_FMR_MIN_SIZE;32113211- max_pages_per_fmr /= 2, srp_dev->fmr_max_size /= 2) {32123212- memset(&fmr_param, 0, sizeof fmr_param);32133213- fmr_param.pool_size = SRP_FMR_POOL_SIZE;32143214- fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE;32153215- fmr_param.cache = 1;32163216- fmr_param.max_pages_per_fmr = max_pages_per_fmr;32173217- fmr_param.page_shift = fmr_page_shift;32183218- fmr_param.access = (IB_ACCESS_LOCAL_WRITE |32193219- IB_ACCESS_REMOTE_WRITE |32203220- IB_ACCESS_REMOTE_READ);32213221-32223222- srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);32233223- if (!IS_ERR(srp_dev->fmr_pool))32243224- break;32253225- }32263226-32273227- if (IS_ERR(srp_dev->fmr_pool))32283228- srp_dev->fmr_pool = NULL;3229283332302834 if (device->node_type == RDMA_NODE_IB_SWITCH) {32312835 s = 0;···32682914 kfree(host);32692915 }3270291632713271- if (srp_dev->fmr_pool)32723272- ib_destroy_fmr_pool(srp_dev->fmr_pool);32732917 ib_dereg_mr(srp_dev->mr);32742918 ib_dealloc_pd(srp_dev->pd);32752919
+79-15
drivers/infiniband/ulp/srp/ib_srp.h
···6666 SRP_TAG_NO_REQ = ~0U,6767 SRP_TAG_TSK_MGMT = 1U << 31,68686969- SRP_FMR_SIZE = 512,7070- SRP_FMR_MIN_SIZE = 128,7171- SRP_FMR_POOL_SIZE = 1024,7272- SRP_FMR_DIRTY_SIZE = SRP_FMR_POOL_SIZE / 4,6969+ SRP_MAX_PAGES_PER_MR = 512,73707474- SRP_MAP_ALLOW_FMR = 0,7575- SRP_MAP_NO_FMR = 1,7171+ LOCAL_INV_WR_ID_MASK = 1,7272+ FAST_REG_WR_ID_MASK = 2,7673};77747875enum srp_target_state {···8386 SRP_IU_RSP,8487};85888989+/*9090+ * @mr_page_mask: HCA memory registration page mask.9191+ * @mr_page_size: HCA memory registration page size.9292+ * @mr_max_size: Maximum size in bytes of a single FMR / FR registration9393+ * request.9494+ */8695struct srp_device {8796 struct list_head dev_list;8897 struct ib_device *dev;8998 struct ib_pd *pd;9099 struct ib_mr *mr;9191- struct ib_fmr_pool *fmr_pool;9292- u64 fmr_page_mask;9393- int fmr_page_size;9494- int fmr_max_size;100100+ u64 mr_page_mask;101101+ int mr_page_size;102102+ int mr_max_size;103103+ int max_pages_per_mr;104104+ bool has_fmr;105105+ bool has_fr;106106+ bool use_fast_reg;95107};9610897109struct srp_host {···118112 struct list_head list;119113 struct scsi_cmnd *scmnd;120114 struct srp_iu *cmd;121121- struct ib_pool_fmr **fmr_list;115115+ union {116116+ struct ib_pool_fmr **fmr_list;117117+ struct srp_fr_desc **fr_list;118118+ };122119 u64 *map_page;123120 struct srp_direct_buf *indirect_desc;124121 dma_addr_t indirect_dma_addr;125125- short nfmr;122122+ short nmdesc;126123 short index;127124};128125···140131 struct ib_cq *send_cq ____cacheline_aligned_in_smp;141132 struct ib_cq *recv_cq;142133 struct ib_qp *qp;134134+ union {135135+ struct ib_fmr_pool *fmr_pool;136136+ struct srp_fr_pool *fr_pool;137137+ };143138 u32 lkey;144139 u32 rkey;145140 enum srp_target_state state;···210197 enum dma_data_direction direction;211198};212199200200+/**201201+ * struct srp_fr_desc - fast registration work request arguments202202+ * @entry: Entry in srp_fr_pool.free_list.203203+ * @mr: Memory region.204204+ * @frpl: Fast registration page list.205205+ */206206+struct srp_fr_desc {207207+ struct list_head entry;208208+ struct ib_mr *mr;209209+ struct ib_fast_reg_page_list *frpl;210210+};211211+212212+/**213213+ * struct srp_fr_pool - pool of fast registration descriptors214214+ *215215+ * An entry is available for allocation if and only if it occurs in @free_list.216216+ *217217+ * @size: Number of descriptors in this pool.218218+ * @max_page_list_len: Maximum fast registration work request page list length.219219+ * @lock: Protects free_list.220220+ * @free_list: List of free descriptors.221221+ * @desc: Fast registration descriptor pool.222222+ */223223+struct srp_fr_pool {224224+ int size;225225+ int max_page_list_len;226226+ spinlock_t lock;227227+ struct list_head free_list;228228+ struct srp_fr_desc desc[0];229229+};230230+231231+/**232232+ * struct srp_map_state - per-request DMA memory mapping state233233+ * @desc: Pointer to the element of the SRP buffer descriptor array234234+ * that is being filled in.235235+ * @pages: Array with DMA addresses of pages being considered for236236+ * memory registration.237237+ * @base_dma_addr: DMA address of the first page that has not yet been mapped.238238+ * @dma_len: Number of bytes that will be registered with the next239239+ * FMR or FR memory registration call.240240+ * @total_len: Total number of bytes in the sg-list being mapped.241241+ * @npages: Number of page addresses in the pages[] array.242242+ * @nmdesc: Number of FMR or FR memory descriptors used for mapping.243243+ * @ndesc: Number of SRP buffer descriptors that have been filled in.244244+ * @unmapped_sg: First element of the sg-list that is mapped via FMR or FR.245245+ * @unmapped_index: Index of the first element mapped via FMR or FR.246246+ * @unmapped_addr: DMA address of the first element mapped via FMR or FR.247247+ */213248struct srp_map_state {214214- struct ib_pool_fmr **next_fmr;249249+ union {250250+ struct ib_pool_fmr **next_fmr;251251+ struct srp_fr_desc **next_fr;252252+ };215253 struct srp_direct_buf *desc;216254 u64 *pages;217255 dma_addr_t base_dma_addr;218218- u32 fmr_len;256256+ u32 dma_len;219257 u32 total_len;220258 unsigned int npages;221221- unsigned int nfmr;259259+ unsigned int nmdesc;222260 unsigned int ndesc;223261 struct scatterlist *unmapped_sg;224262 int unmapped_index;
···178178 struct mlx4_cmd_info *cmd)179179{180180 struct mlx4_priv *priv = mlx4_priv(dev);181181- u8 field;182182- u32 size;181181+ u8 field, port;182182+ u32 size, proxy_qp, qkey;183183 int err = 0;184184185185#define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0···209209210210/* when opcode modifier = 1 */211211#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3212212+#define QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET 0x4212213#define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8213214#define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc214215···222221#define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40223222#define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80224223#define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10224224+#define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08225225226226#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80227227···236234 return -EINVAL;237235238236 vhcr->in_modifier = converted_port;239239- /* Set nic_info bit to mark new fields support */240240- field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO;241241- MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);242242-243237 /* phys-port = logical-port */244238 field = vhcr->in_modifier -245239 find_first_bit(actv_ports.ports, dev->caps.num_ports);246240 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);247241248248- field = vhcr->in_modifier;242242+ port = vhcr->in_modifier;243243+ proxy_qp = dev->phys_caps.base_proxy_sqpn + 8 * slave + port - 1;244244+245245+ /* Set nic_info bit to mark new fields support */246246+ field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO;247247+248248+ if (mlx4_vf_smi_enabled(dev, slave, port) &&249249+ !mlx4_get_parav_qkey(dev, proxy_qp, &qkey)) {250250+ field |= QUERY_FUNC_CAP_VF_ENABLE_QP0;251251+ MLX4_PUT(outbox->buf, qkey,252252+ QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET);253253+ }254254+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);255255+249256 /* size is now the QP number */250250- size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + field - 1;257257+ size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + port - 1;251258 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL);252259253260 size += 2;254261 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL);255262256256- size = dev->phys_caps.base_proxy_sqpn + 8 * slave + field - 1;257257- MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_PROXY);258258-259259- size += 2;260260- MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY);263263+ MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP0_PROXY);264264+ proxy_qp += 2;265265+ MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP1_PROXY);261266262267 MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],263268 QUERY_FUNC_CAP_PHYS_PORT_ID);···335326 struct mlx4_cmd_mailbox *mailbox;336327 u32 *outbox;337328 u8 field, op_modifier;338338- u32 size;329329+ u32 size, qkey;339330 int err = 0, quotas = 0;340331341332 op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */···423414424415 MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);425416 if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {426426- if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_OFFSET) {417417+ if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN) {427418 mlx4_err(dev, "VLAN is enforced on this port\n");428419 err = -EPROTONOSUPPORT;429420 goto out;···449440 if (func_cap->physical_port != gen_or_port) {450441 err = -ENOSYS;451442 goto out;443443+ }444444+445445+ if (func_cap->flags1 & QUERY_FUNC_CAP_VF_ENABLE_QP0) {446446+ MLX4_GET(qkey, outbox, QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET);447447+ func_cap->qp0_qkey = qkey;448448+ } else {449449+ func_cap->qp0_qkey = 0;452450 }453451454452 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL);
+1
drivers/net/ethernet/mellanox/mlx4/fw.h
···134134 int max_eq;135135 int reserved_eq;136136 int mcg_quota;137137+ u32 qp0_qkey;137138 u32 qp0_tunnel_qpn;138139 u32 qp0_proxy_qpn;139140 u32 qp1_tunnel_qpn;
+4-3
drivers/net/ethernet/mellanox/mlx4/icm.c
···245245 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);246246}247247248248-int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)248248+int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj,249249+ gfp_t gfp)249250{250251 u32 i = (obj & (table->num_obj - 1)) /251252 (MLX4_TABLE_CHUNK_SIZE / table->obj_size);···260259 }261260262261 table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,263263- (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |262262+ (table->lowmem ? gfp : GFP_HIGHUSER) |264263 __GFP_NOWARN, table->coherent);265264 if (!table->icm[i]) {266265 ret = -ENOMEM;···357356 u32 i;358357359358 for (i = start; i <= end; i += inc) {360360- err = mlx4_table_get(dev, table, i);359359+ err = mlx4_table_get(dev, table, i, GFP_KERNEL);361360 if (err)362361 goto fail;363362 }
···8080 RDMA_TRANSPORT_USNIC_UDP8181};82828383-enum rdma_transport_type8484-rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;8383+__attribute_const__ enum rdma_transport_type8484+rdma_node_get_transport(enum rdma_node_type node_type);85858686enum rdma_link_layer {8787 IB_LINK_LAYER_UNSPECIFIED,···466466 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.467467 * @rate: rate to convert.468468 */469469-int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;469469+__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);470470471471/**472472 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.473473 * For example, IB_RATE_2_5_GBPS will be converted to 2500.474474 * @rate: rate to convert.475475 */476476-int ib_rate_to_mbps(enum ib_rate rate) __attribute_const__;476476+__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);477477478478enum ib_mr_create_flags {479479 IB_MR_SIGNATURE_EN = 1,···604604 * enum.605605 * @mult: multiple to convert.606606 */607607-enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;607607+__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);608608609609struct ib_ah_attr {610610 struct ib_global_route grh;···783783 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,784784 IB_QP_CREATE_NETIF_QP = 1 << 5,785785 IB_QP_CREATE_SIGNATURE_EN = 1 << 6,786786+ IB_QP_CREATE_USE_GFP_NOIO = 1 << 7,786787 /* reserve bits 26-31 for low level drivers' internal use */787788 IB_QP_CREATE_RESERVED_START = 1 << 26,788789 IB_QP_CREATE_RESERVED_END = 1 << 31,
+199
include/rdma/iw_portmap.h
···11+/*22+ * Copyright (c) 2014 Intel Corporation. All rights reserved.33+ * Copyright (c) 2014 Chelsio, Inc. All rights reserved.44+ *55+ * This software is available to you under a choice of one of two66+ * licenses. You may choose to be licensed under the terms of the GNU77+ * General Public License (GPL) Version 2, available from the file88+ * COPYING in the main directory of this source tree, or the99+ * OpenIB.org BSD license below:1010+ *1111+ * Redistribution and use in source and binary forms, with or1212+ * without modification, are permitted provided that the following1313+ * conditions are met:1414+ *1515+ * - Redistributions of source code must retain the above1616+ * copyright notice, this list of conditions and the following1717+ * disclaimer.1818+ *1919+ * - Redistributions in binary form must reproduce the above2020+ * copyright notice, this list of conditions and the following2121+ * disclaimer in the documentation and/or other materials2222+ * provided with the distribution.2323+ *2424+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,2525+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF2626+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND2727+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS2828+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN2929+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN3030+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE3131+ * SOFTWARE.3232+ */3333+#ifndef _IW_PORTMAP_H3434+#define _IW_PORTMAP_H3535+3636+#define IWPM_ULIBNAME_SIZE 323737+#define IWPM_DEVNAME_SIZE 323838+#define IWPM_IFNAME_SIZE 163939+#define IWPM_IPADDR_SIZE 164040+4141+enum {4242+ IWPM_INVALID_NLMSG_ERR = 10,4343+ IWPM_CREATE_MAPPING_ERR,4444+ IWPM_DUPLICATE_MAPPING_ERR,4545+ IWPM_UNKNOWN_MAPPING_ERR,4646+ IWPM_CLIENT_DEV_INFO_ERR,4747+ IWPM_USER_LIB_INFO_ERR,4848+ IWPM_REMOTE_QUERY_REJECT4949+};5050+5151+struct iwpm_dev_data {5252+ char dev_name[IWPM_DEVNAME_SIZE];5353+ char if_name[IWPM_IFNAME_SIZE];5454+};5555+5656+struct iwpm_sa_data {5757+ struct sockaddr_storage loc_addr;5858+ struct sockaddr_storage mapped_loc_addr;5959+ struct sockaddr_storage rem_addr;6060+ struct sockaddr_storage mapped_rem_addr;6161+};6262+6363+/**6464+ * iwpm_init - Allocate resources for the iwarp port mapper6565+ *6666+ * Should be called when network interface goes up.6767+ */6868+int iwpm_init(u8);6969+7070+/**7171+ * iwpm_exit - Deallocate resources for the iwarp port mapper7272+ *7373+ * Should be called when network interface goes down.7474+ */7575+int iwpm_exit(u8);7676+7777+/**7878+ * iwpm_valid_pid - Check if the userspace iwarp port mapper pid is valid7979+ *8080+ * Returns true if the pid is greater than zero, otherwise returns false8181+ */8282+int iwpm_valid_pid(void);8383+8484+/**8585+ * iwpm_register_pid - Send a netlink query to userspace8686+ * to get the iwarp port mapper pid8787+ * @pm_msg: Contains driver info to send to the userspace port mapper8888+ * @nl_client: The index of the netlink client8989+ */9090+int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client);9191+9292+/**9393+ * iwpm_add_mapping - Send a netlink add mapping request to9494+ * the userspace port mapper9595+ * @pm_msg: Contains the local ip/tcp address info to send9696+ * @nl_client: The index of the netlink client9797+ *9898+ * If the request is successful, the pm_msg stores9999+ * the port mapper response (mapped address info)100100+ */101101+int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client);102102+103103+/**104104+ * iwpm_add_and_query_mapping - Send a netlink add and query mapping request105105+ * to the userspace port mapper106106+ * @pm_msg: Contains the local and remote ip/tcp address info to send107107+ * @nl_client: The index of the netlink client108108+ *109109+ * If the request is successful, the pm_msg stores the110110+ * port mapper response (mapped local and remote address info)111111+ */112112+int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client);113113+114114+/**115115+ * iwpm_remove_mapping - Send a netlink remove mapping request116116+ * to the userspace port mapper117117+ *118118+ * @local_addr: Local ip/tcp address to remove119119+ * @nl_client: The index of the netlink client120120+ */121121+int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client);122122+123123+/**124124+ * iwpm_register_pid_cb - Process the port mapper response to125125+ * iwpm_register_pid query126126+ * @skb:127127+ * @cb: Contains the received message (payload and netlink header)128128+ *129129+ * If successful, the function receives the userspace port mapper pid130130+ * which is used in future communication with the port mapper131131+ */132132+int iwpm_register_pid_cb(struct sk_buff *, struct netlink_callback *);133133+134134+/**135135+ * iwpm_add_mapping_cb - Process the port mapper response to136136+ * iwpm_add_mapping request137137+ * @skb:138138+ * @cb: Contains the received message (payload and netlink header)139139+ */140140+int iwpm_add_mapping_cb(struct sk_buff *, struct netlink_callback *);141141+142142+/**143143+ * iwpm_add_and_query_mapping_cb - Process the port mapper response to144144+ * iwpm_add_and_query_mapping request145145+ * @skb:146146+ * @cb: Contains the received message (payload and netlink header)147147+ */148148+int iwpm_add_and_query_mapping_cb(struct sk_buff *, struct netlink_callback *);149149+150150+/**151151+ * iwpm_mapping_error_cb - Process port mapper notification for error152152+ *153153+ * @skb:154154+ * @cb: Contains the received message (payload and netlink header)155155+ */156156+int iwpm_mapping_error_cb(struct sk_buff *, struct netlink_callback *);157157+158158+/**159159+ * iwpm_mapping_info_cb - Process a notification that the userspace160160+ * port mapper daemon is started161161+ * @skb:162162+ * @cb: Contains the received message (payload and netlink header)163163+ *164164+ * Using the received port mapper pid, send all the local mapping165165+ * info records to the userspace port mapper166166+ */167167+int iwpm_mapping_info_cb(struct sk_buff *, struct netlink_callback *);168168+169169+/**170170+ * iwpm_ack_mapping_info_cb - Process the port mapper ack for171171+ * the provided local mapping info records172172+ * @skb:173173+ * @cb: Contains the received message (payload and netlink header)174174+ */175175+int iwpm_ack_mapping_info_cb(struct sk_buff *, struct netlink_callback *);176176+177177+/**178178+ * iwpm_create_mapinfo - Store local and mapped IPv4/IPv6 address179179+ * info in a hash table180180+ * @local_addr: Local ip/tcp address181181+ * @mapped_addr: Mapped local ip/tcp address182182+ * @nl_client: The index of the netlink client183183+ */184184+int iwpm_create_mapinfo(struct sockaddr_storage *local_addr,185185+ struct sockaddr_storage *mapped_addr, u8 nl_client);186186+187187+/**188188+ * iwpm_remove_mapinfo - Remove local and mapped IPv4/IPv6 address189189+ * info from the hash table190190+ * @local_addr: Local ip/tcp address191191+ * @mapped_addr: Mapped local ip/tcp address192192+ *193193+ * Returns err code if mapping info is not found in the hash table,194194+ * otherwise returns 0195195+ */196196+int iwpm_remove_mapinfo(struct sockaddr_storage *local_addr,197197+ struct sockaddr_storage *mapped_addr);198198+199199+#endif /* _IW_PORTMAP_H */
+22-1
include/rdma/rdma_netlink.h
···4343 * Returns the allocated buffer on success and NULL on failure.4444 */4545void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq,4646- int len, int client, int op);4646+ int len, int client, int op, int flags);4747/**4848 * Put a new attribute in a supplied skb.4949 * @skb: The netlink skb.···5555 */5656int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,5757 int len, void *data, int type);5858+5959+/**6060+ * Send the supplied skb to a specific userspace PID.6161+ * @skb: The netlink skb6262+ * @nlh: Header of the netlink message to send6363+ * @pid: Userspace netlink process ID6464+ * Returns 0 on success or a negative error code.6565+ */6666+int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh,6767+ __u32 pid);6868+6969+/**7070+ * Send the supplied skb to a netlink group.7171+ * @skb: The netlink skb7272+ * @nlh: Header of the netlink message to send7373+ * @group: Netlink group ID7474+ * @flags: allocation flags7575+ * Returns 0 on success or a negative error code.7676+ */7777+int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh,7878+ unsigned int group, gfp_t flags);58795980#endif /* _RDMA_NETLINK_H */