Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branches 'cxgb4-4.8', 'mlx5-4.8' and 'fw-version' into k.o/for-4.8

+3149 -491
+9
drivers/infiniband/core/device.c
··· 311 311 return 0; 312 312 } 313 313 314 + void ib_get_device_fw_str(struct ib_device *dev, char *str, size_t str_len) 315 + { 316 + if (dev->get_dev_fw_str) 317 + dev->get_dev_fw_str(dev, str, str_len); 318 + else 319 + str[0] = '\0'; 320 + } 321 + EXPORT_SYMBOL(ib_get_device_fw_str); 322 + 314 323 /** 315 324 * ib_register_device - Register an IB device with IB core 316 325 * @device:Device to register
+14 -1
drivers/infiniband/core/sysfs.c
··· 38 38 #include <linux/stat.h> 39 39 #include <linux/string.h> 40 40 #include <linux/netdevice.h> 41 + #include <linux/ethtool.h> 41 42 42 43 #include <rdma/ib_mad.h> 43 44 #include <rdma/ib_pma.h> ··· 1197 1196 return count; 1198 1197 } 1199 1198 1199 + static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, 1200 + char *buf) 1201 + { 1202 + struct ib_device *dev = container_of(device, struct ib_device, dev); 1203 + 1204 + ib_get_device_fw_str(dev, buf, PAGE_SIZE); 1205 + strlcat(buf, "\n", PAGE_SIZE); 1206 + return strlen(buf); 1207 + } 1208 + 1200 1209 static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL); 1201 1210 static DEVICE_ATTR(sys_image_guid, S_IRUGO, show_sys_image_guid, NULL); 1202 1211 static DEVICE_ATTR(node_guid, S_IRUGO, show_node_guid, NULL); 1203 1212 static DEVICE_ATTR(node_desc, S_IRUGO | S_IWUSR, show_node_desc, set_node_desc); 1213 + static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); 1204 1214 1205 1215 static struct device_attribute *ib_class_attributes[] = { 1206 1216 &dev_attr_node_type, 1207 1217 &dev_attr_sys_image_guid, 1208 1218 &dev_attr_node_guid, 1209 - &dev_attr_node_desc 1219 + &dev_attr_node_desc, 1220 + &dev_attr_fw_ver, 1210 1221 }; 1211 1222 1212 1223 static void free_port_list_attributes(struct ib_device *device)
+13
drivers/infiniband/core/uverbs.h
··· 162 162 struct ib_uxrcd_object *uxrcd; 163 163 }; 164 164 165 + struct ib_uwq_object { 166 + struct ib_uevent_object uevent; 167 + }; 168 + 165 169 struct ib_ucq_object { 166 170 struct ib_uobject uobject; 167 171 struct ib_uverbs_file *uverbs_file; ··· 185 181 extern struct idr ib_uverbs_srq_idr; 186 182 extern struct idr ib_uverbs_xrcd_idr; 187 183 extern struct idr ib_uverbs_rule_idr; 184 + extern struct idr ib_uverbs_wq_idr; 185 + extern struct idr ib_uverbs_rwq_ind_tbl_idr; 188 186 189 187 void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj); 190 188 ··· 205 199 void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context); 206 200 void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr); 207 201 void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr); 202 + void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr); 208 203 void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr); 209 204 void ib_uverbs_event_handler(struct ib_event_handler *handler, 210 205 struct ib_event *event); ··· 226 219 struct ib_uverbs_flow_spec_eth eth; 227 220 struct ib_uverbs_flow_spec_ipv4 ipv4; 228 221 struct ib_uverbs_flow_spec_tcp_udp tcp_udp; 222 + struct ib_uverbs_flow_spec_ipv6 ipv6; 229 223 }; 230 224 }; 231 225 ··· 283 275 IB_UVERBS_DECLARE_EX_CMD(query_device); 284 276 IB_UVERBS_DECLARE_EX_CMD(create_cq); 285 277 IB_UVERBS_DECLARE_EX_CMD(create_qp); 278 + IB_UVERBS_DECLARE_EX_CMD(create_wq); 279 + IB_UVERBS_DECLARE_EX_CMD(modify_wq); 280 + IB_UVERBS_DECLARE_EX_CMD(destroy_wq); 281 + IB_UVERBS_DECLARE_EX_CMD(create_rwq_ind_table); 282 + IB_UVERBS_DECLARE_EX_CMD(destroy_rwq_ind_table); 286 283 287 284 #endif /* UVERBS_H */
+525 -12
drivers/infiniband/core/uverbs_cmd.c
··· 57 57 static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" }; 58 58 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" }; 59 59 static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" }; 60 + static struct uverbs_lock_class wq_lock_class = { .name = "WQ-uobj" }; 61 + static struct uverbs_lock_class rwq_ind_table_lock_class = { .name = "IND_TBL-uobj" }; 60 62 61 63 /* 62 64 * The ib_uobject locking scheme is as follows: ··· 245 243 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0); 246 244 } 247 245 246 + static struct ib_wq *idr_read_wq(int wq_handle, struct ib_ucontext *context) 247 + { 248 + return idr_read_obj(&ib_uverbs_wq_idr, wq_handle, context, 0); 249 + } 250 + 251 + static void put_wq_read(struct ib_wq *wq) 252 + { 253 + put_uobj_read(wq->uobject); 254 + } 255 + 256 + static struct ib_rwq_ind_table *idr_read_rwq_indirection_table(int ind_table_handle, 257 + struct ib_ucontext *context) 258 + { 259 + return idr_read_obj(&ib_uverbs_rwq_ind_tbl_idr, ind_table_handle, context, 0); 260 + } 261 + 262 + static void put_rwq_indirection_table_read(struct ib_rwq_ind_table *ind_table) 263 + { 264 + put_uobj_read(ind_table->uobject); 265 + } 266 + 248 267 static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context) 249 268 { 250 269 struct ib_uobject *uobj; ··· 349 326 INIT_LIST_HEAD(&ucontext->qp_list); 350 327 INIT_LIST_HEAD(&ucontext->srq_list); 351 328 INIT_LIST_HEAD(&ucontext->ah_list); 329 + INIT_LIST_HEAD(&ucontext->wq_list); 330 + INIT_LIST_HEAD(&ucontext->rwq_ind_tbl_list); 352 331 INIT_LIST_HEAD(&ucontext->xrcd_list); 353 332 INIT_LIST_HEAD(&ucontext->rule_list); 354 333 rcu_read_lock(); ··· 1772 1747 struct ib_srq *srq = NULL; 1773 1748 struct ib_qp *qp; 1774 1749 char *buf; 1775 - struct ib_qp_init_attr attr; 1750 + struct ib_qp_init_attr attr = {}; 1776 1751 struct ib_uverbs_ex_create_qp_resp resp; 1777 1752 int ret; 1753 + struct ib_rwq_ind_table *ind_tbl = NULL; 1754 + bool has_sq = true; 1778 1755 1779 1756 if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) 1780 1757 return -EPERM; ··· 1788 1761 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, 1789 1762 &qp_lock_class); 1790 1763 down_write(&obj->uevent.uobject.mutex); 1764 + if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) + 1765 + sizeof(cmd->rwq_ind_tbl_handle) && 1766 + (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) { 1767 + ind_tbl = idr_read_rwq_indirection_table(cmd->rwq_ind_tbl_handle, 1768 + file->ucontext); 1769 + if (!ind_tbl) { 1770 + ret = -EINVAL; 1771 + goto err_put; 1772 + } 1773 + 1774 + attr.rwq_ind_tbl = ind_tbl; 1775 + } 1776 + 1777 + if ((cmd_sz >= offsetof(typeof(*cmd), reserved1) + 1778 + sizeof(cmd->reserved1)) && cmd->reserved1) { 1779 + ret = -EOPNOTSUPP; 1780 + goto err_put; 1781 + } 1782 + 1783 + if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) { 1784 + ret = -EINVAL; 1785 + goto err_put; 1786 + } 1787 + 1788 + if (ind_tbl && !cmd->max_send_wr) 1789 + has_sq = false; 1791 1790 1792 1791 if (cmd->qp_type == IB_QPT_XRC_TGT) { 1793 1792 xrcd = idr_read_xrcd(cmd->pd_handle, file->ucontext, ··· 1837 1784 } 1838 1785 } 1839 1786 1840 - if (cmd->recv_cq_handle != cmd->send_cq_handle) { 1841 - rcq = idr_read_cq(cmd->recv_cq_handle, 1842 - file->ucontext, 0); 1843 - if (!rcq) { 1844 - ret = -EINVAL; 1845 - goto err_put; 1787 + if (!ind_tbl) { 1788 + if (cmd->recv_cq_handle != cmd->send_cq_handle) { 1789 + rcq = idr_read_cq(cmd->recv_cq_handle, 1790 + file->ucontext, 0); 1791 + if (!rcq) { 1792 + ret = -EINVAL; 1793 + goto err_put; 1794 + } 1846 1795 } 1847 1796 } 1848 1797 } 1849 1798 1850 - scq = idr_read_cq(cmd->send_cq_handle, file->ucontext, !!rcq); 1851 - rcq = rcq ?: scq; 1799 + if (has_sq) 1800 + scq = idr_read_cq(cmd->send_cq_handle, file->ucontext, !!rcq); 1801 + if (!ind_tbl) 1802 + rcq = rcq ?: scq; 1852 1803 pd = idr_read_pd(cmd->pd_handle, file->ucontext); 1853 - if (!pd || !scq) { 1804 + if (!pd || (!scq && has_sq)) { 1854 1805 ret = -EINVAL; 1855 1806 goto err_put; 1856 1807 } ··· 1921 1864 qp->send_cq = attr.send_cq; 1922 1865 qp->recv_cq = attr.recv_cq; 1923 1866 qp->srq = attr.srq; 1867 + qp->rwq_ind_tbl = ind_tbl; 1924 1868 qp->event_handler = attr.event_handler; 1925 1869 qp->qp_context = attr.qp_context; 1926 1870 qp->qp_type = attr.qp_type; 1927 1871 atomic_set(&qp->usecnt, 0); 1928 1872 atomic_inc(&pd->usecnt); 1929 - atomic_inc(&attr.send_cq->usecnt); 1873 + if (attr.send_cq) 1874 + atomic_inc(&attr.send_cq->usecnt); 1930 1875 if (attr.recv_cq) 1931 1876 atomic_inc(&attr.recv_cq->usecnt); 1932 1877 if (attr.srq) 1933 1878 atomic_inc(&attr.srq->usecnt); 1879 + if (ind_tbl) 1880 + atomic_inc(&ind_tbl->usecnt); 1934 1881 } 1935 1882 qp->uobject = &obj->uevent.uobject; 1936 1883 ··· 1974 1913 put_cq_read(rcq); 1975 1914 if (srq) 1976 1915 put_srq_read(srq); 1916 + if (ind_tbl) 1917 + put_rwq_indirection_table_read(ind_tbl); 1977 1918 1978 1919 mutex_lock(&file->mutex); 1979 1920 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); ··· 2003 1940 put_cq_read(rcq); 2004 1941 if (srq) 2005 1942 put_srq_read(srq); 1943 + if (ind_tbl) 1944 + put_rwq_indirection_table_read(ind_tbl); 2006 1945 2007 1946 put_uobj_write(&obj->uevent.uobject); 2008 1947 return ret; ··· 2098 2033 if (err) 2099 2034 return err; 2100 2035 2101 - if (cmd.comp_mask) 2036 + if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK) 2102 2037 return -EINVAL; 2103 2038 2104 2039 if (cmd.reserved) ··· 3105 3040 memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask, 3106 3041 sizeof(struct ib_flow_ipv4_filter)); 3107 3042 break; 3043 + case IB_FLOW_SPEC_IPV6: 3044 + ib_spec->ipv6.size = sizeof(struct ib_flow_spec_ipv6); 3045 + if (ib_spec->ipv6.size != kern_spec->ipv6.size) 3046 + return -EINVAL; 3047 + memcpy(&ib_spec->ipv6.val, &kern_spec->ipv6.val, 3048 + sizeof(struct ib_flow_ipv6_filter)); 3049 + memcpy(&ib_spec->ipv6.mask, &kern_spec->ipv6.mask, 3050 + sizeof(struct ib_flow_ipv6_filter)); 3051 + break; 3108 3052 case IB_FLOW_SPEC_TCP: 3109 3053 case IB_FLOW_SPEC_UDP: 3110 3054 ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp); ··· 3128 3054 return -EINVAL; 3129 3055 } 3130 3056 return 0; 3057 + } 3058 + 3059 + int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file, 3060 + struct ib_device *ib_dev, 3061 + struct ib_udata *ucore, 3062 + struct ib_udata *uhw) 3063 + { 3064 + struct ib_uverbs_ex_create_wq cmd = {}; 3065 + struct ib_uverbs_ex_create_wq_resp resp = {}; 3066 + struct ib_uwq_object *obj; 3067 + int err = 0; 3068 + struct ib_cq *cq; 3069 + struct ib_pd *pd; 3070 + struct ib_wq *wq; 3071 + struct ib_wq_init_attr wq_init_attr = {}; 3072 + size_t required_cmd_sz; 3073 + size_t required_resp_len; 3074 + 3075 + required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge); 3076 + required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn); 3077 + 3078 + if (ucore->inlen < required_cmd_sz) 3079 + return -EINVAL; 3080 + 3081 + if (ucore->outlen < required_resp_len) 3082 + return -ENOSPC; 3083 + 3084 + if (ucore->inlen > sizeof(cmd) && 3085 + !ib_is_udata_cleared(ucore, sizeof(cmd), 3086 + ucore->inlen - sizeof(cmd))) 3087 + return -EOPNOTSUPP; 3088 + 3089 + err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3090 + if (err) 3091 + return err; 3092 + 3093 + if (cmd.comp_mask) 3094 + return -EOPNOTSUPP; 3095 + 3096 + obj = kmalloc(sizeof(*obj), GFP_KERNEL); 3097 + if (!obj) 3098 + return -ENOMEM; 3099 + 3100 + init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, 3101 + &wq_lock_class); 3102 + down_write(&obj->uevent.uobject.mutex); 3103 + pd = idr_read_pd(cmd.pd_handle, file->ucontext); 3104 + if (!pd) { 3105 + err = -EINVAL; 3106 + goto err_uobj; 3107 + } 3108 + 3109 + cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 3110 + if (!cq) { 3111 + err = -EINVAL; 3112 + goto err_put_pd; 3113 + } 3114 + 3115 + wq_init_attr.cq = cq; 3116 + wq_init_attr.max_sge = cmd.max_sge; 3117 + wq_init_attr.max_wr = cmd.max_wr; 3118 + wq_init_attr.wq_context = file; 3119 + wq_init_attr.wq_type = cmd.wq_type; 3120 + wq_init_attr.event_handler = ib_uverbs_wq_event_handler; 3121 + obj->uevent.events_reported = 0; 3122 + INIT_LIST_HEAD(&obj->uevent.event_list); 3123 + wq = pd->device->create_wq(pd, &wq_init_attr, uhw); 3124 + if (IS_ERR(wq)) { 3125 + err = PTR_ERR(wq); 3126 + goto err_put_cq; 3127 + } 3128 + 3129 + wq->uobject = &obj->uevent.uobject; 3130 + obj->uevent.uobject.object = wq; 3131 + wq->wq_type = wq_init_attr.wq_type; 3132 + wq->cq = cq; 3133 + wq->pd = pd; 3134 + wq->device = pd->device; 3135 + wq->wq_context = wq_init_attr.wq_context; 3136 + atomic_set(&wq->usecnt, 0); 3137 + atomic_inc(&pd->usecnt); 3138 + atomic_inc(&cq->usecnt); 3139 + wq->uobject = &obj->uevent.uobject; 3140 + obj->uevent.uobject.object = wq; 3141 + err = idr_add_uobj(&ib_uverbs_wq_idr, &obj->uevent.uobject); 3142 + if (err) 3143 + goto destroy_wq; 3144 + 3145 + memset(&resp, 0, sizeof(resp)); 3146 + resp.wq_handle = obj->uevent.uobject.id; 3147 + resp.max_sge = wq_init_attr.max_sge; 3148 + resp.max_wr = wq_init_attr.max_wr; 3149 + resp.wqn = wq->wq_num; 3150 + resp.response_length = required_resp_len; 3151 + err = ib_copy_to_udata(ucore, 3152 + &resp, resp.response_length); 3153 + if (err) 3154 + goto err_copy; 3155 + 3156 + put_pd_read(pd); 3157 + put_cq_read(cq); 3158 + 3159 + mutex_lock(&file->mutex); 3160 + list_add_tail(&obj->uevent.uobject.list, &file->ucontext->wq_list); 3161 + mutex_unlock(&file->mutex); 3162 + 3163 + obj->uevent.uobject.live = 1; 3164 + up_write(&obj->uevent.uobject.mutex); 3165 + return 0; 3166 + 3167 + err_copy: 3168 + idr_remove_uobj(&ib_uverbs_wq_idr, &obj->uevent.uobject); 3169 + destroy_wq: 3170 + ib_destroy_wq(wq); 3171 + err_put_cq: 3172 + put_cq_read(cq); 3173 + err_put_pd: 3174 + put_pd_read(pd); 3175 + err_uobj: 3176 + put_uobj_write(&obj->uevent.uobject); 3177 + 3178 + return err; 3179 + } 3180 + 3181 + int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file, 3182 + struct ib_device *ib_dev, 3183 + struct ib_udata *ucore, 3184 + struct ib_udata *uhw) 3185 + { 3186 + struct ib_uverbs_ex_destroy_wq cmd = {}; 3187 + struct ib_uverbs_ex_destroy_wq_resp resp = {}; 3188 + struct ib_wq *wq; 3189 + struct ib_uobject *uobj; 3190 + struct ib_uwq_object *obj; 3191 + size_t required_cmd_sz; 3192 + size_t required_resp_len; 3193 + int ret; 3194 + 3195 + required_cmd_sz = offsetof(typeof(cmd), wq_handle) + sizeof(cmd.wq_handle); 3196 + required_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); 3197 + 3198 + if (ucore->inlen < required_cmd_sz) 3199 + return -EINVAL; 3200 + 3201 + if (ucore->outlen < required_resp_len) 3202 + return -ENOSPC; 3203 + 3204 + if (ucore->inlen > sizeof(cmd) && 3205 + !ib_is_udata_cleared(ucore, sizeof(cmd), 3206 + ucore->inlen - sizeof(cmd))) 3207 + return -EOPNOTSUPP; 3208 + 3209 + ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3210 + if (ret) 3211 + return ret; 3212 + 3213 + if (cmd.comp_mask) 3214 + return -EOPNOTSUPP; 3215 + 3216 + resp.response_length = required_resp_len; 3217 + uobj = idr_write_uobj(&ib_uverbs_wq_idr, cmd.wq_handle, 3218 + file->ucontext); 3219 + if (!uobj) 3220 + return -EINVAL; 3221 + 3222 + wq = uobj->object; 3223 + obj = container_of(uobj, struct ib_uwq_object, uevent.uobject); 3224 + ret = ib_destroy_wq(wq); 3225 + if (!ret) 3226 + uobj->live = 0; 3227 + 3228 + put_uobj_write(uobj); 3229 + if (ret) 3230 + return ret; 3231 + 3232 + idr_remove_uobj(&ib_uverbs_wq_idr, uobj); 3233 + 3234 + mutex_lock(&file->mutex); 3235 + list_del(&uobj->list); 3236 + mutex_unlock(&file->mutex); 3237 + 3238 + ib_uverbs_release_uevent(file, &obj->uevent); 3239 + resp.events_reported = obj->uevent.events_reported; 3240 + put_uobj(uobj); 3241 + 3242 + ret = ib_copy_to_udata(ucore, &resp, resp.response_length); 3243 + if (ret) 3244 + return ret; 3245 + 3246 + return 0; 3247 + } 3248 + 3249 + int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file, 3250 + struct ib_device *ib_dev, 3251 + struct ib_udata *ucore, 3252 + struct ib_udata *uhw) 3253 + { 3254 + struct ib_uverbs_ex_modify_wq cmd = {}; 3255 + struct ib_wq *wq; 3256 + struct ib_wq_attr wq_attr = {}; 3257 + size_t required_cmd_sz; 3258 + int ret; 3259 + 3260 + required_cmd_sz = offsetof(typeof(cmd), curr_wq_state) + sizeof(cmd.curr_wq_state); 3261 + if (ucore->inlen < required_cmd_sz) 3262 + return -EINVAL; 3263 + 3264 + if (ucore->inlen > sizeof(cmd) && 3265 + !ib_is_udata_cleared(ucore, sizeof(cmd), 3266 + ucore->inlen - sizeof(cmd))) 3267 + return -EOPNOTSUPP; 3268 + 3269 + ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3270 + if (ret) 3271 + return ret; 3272 + 3273 + if (!cmd.attr_mask) 3274 + return -EINVAL; 3275 + 3276 + if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE)) 3277 + return -EINVAL; 3278 + 3279 + wq = idr_read_wq(cmd.wq_handle, file->ucontext); 3280 + if (!wq) 3281 + return -EINVAL; 3282 + 3283 + wq_attr.curr_wq_state = cmd.curr_wq_state; 3284 + wq_attr.wq_state = cmd.wq_state; 3285 + ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw); 3286 + put_wq_read(wq); 3287 + return ret; 3288 + } 3289 + 3290 + int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file, 3291 + struct ib_device *ib_dev, 3292 + struct ib_udata *ucore, 3293 + struct ib_udata *uhw) 3294 + { 3295 + struct ib_uverbs_ex_create_rwq_ind_table cmd = {}; 3296 + struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {}; 3297 + struct ib_uobject *uobj; 3298 + int err = 0; 3299 + struct ib_rwq_ind_table_init_attr init_attr = {}; 3300 + struct ib_rwq_ind_table *rwq_ind_tbl; 3301 + struct ib_wq **wqs = NULL; 3302 + u32 *wqs_handles = NULL; 3303 + struct ib_wq *wq = NULL; 3304 + int i, j, num_read_wqs; 3305 + u32 num_wq_handles; 3306 + u32 expected_in_size; 3307 + size_t required_cmd_sz_header; 3308 + size_t required_resp_len; 3309 + 3310 + required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size); 3311 + required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num); 3312 + 3313 + if (ucore->inlen < required_cmd_sz_header) 3314 + return -EINVAL; 3315 + 3316 + if (ucore->outlen < required_resp_len) 3317 + return -ENOSPC; 3318 + 3319 + err = ib_copy_from_udata(&cmd, ucore, required_cmd_sz_header); 3320 + if (err) 3321 + return err; 3322 + 3323 + ucore->inbuf += required_cmd_sz_header; 3324 + ucore->inlen -= required_cmd_sz_header; 3325 + 3326 + if (cmd.comp_mask) 3327 + return -EOPNOTSUPP; 3328 + 3329 + if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE) 3330 + return -EINVAL; 3331 + 3332 + num_wq_handles = 1 << cmd.log_ind_tbl_size; 3333 + expected_in_size = num_wq_handles * sizeof(__u32); 3334 + if (num_wq_handles == 1) 3335 + /* input size for wq handles is u64 aligned */ 3336 + expected_in_size += sizeof(__u32); 3337 + 3338 + if (ucore->inlen < expected_in_size) 3339 + return -EINVAL; 3340 + 3341 + if (ucore->inlen > expected_in_size && 3342 + !ib_is_udata_cleared(ucore, expected_in_size, 3343 + ucore->inlen - expected_in_size)) 3344 + return -EOPNOTSUPP; 3345 + 3346 + wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles), 3347 + GFP_KERNEL); 3348 + if (!wqs_handles) 3349 + return -ENOMEM; 3350 + 3351 + err = ib_copy_from_udata(wqs_handles, ucore, 3352 + num_wq_handles * sizeof(__u32)); 3353 + if (err) 3354 + goto err_free; 3355 + 3356 + wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL); 3357 + if (!wqs) { 3358 + err = -ENOMEM; 3359 + goto err_free; 3360 + } 3361 + 3362 + for (num_read_wqs = 0; num_read_wqs < num_wq_handles; 3363 + num_read_wqs++) { 3364 + wq = idr_read_wq(wqs_handles[num_read_wqs], file->ucontext); 3365 + if (!wq) { 3366 + err = -EINVAL; 3367 + goto put_wqs; 3368 + } 3369 + 3370 + wqs[num_read_wqs] = wq; 3371 + } 3372 + 3373 + uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 3374 + if (!uobj) { 3375 + err = -ENOMEM; 3376 + goto put_wqs; 3377 + } 3378 + 3379 + init_uobj(uobj, 0, file->ucontext, &rwq_ind_table_lock_class); 3380 + down_write(&uobj->mutex); 3381 + init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size; 3382 + init_attr.ind_tbl = wqs; 3383 + rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw); 3384 + 3385 + if (IS_ERR(rwq_ind_tbl)) { 3386 + err = PTR_ERR(rwq_ind_tbl); 3387 + goto err_uobj; 3388 + } 3389 + 3390 + rwq_ind_tbl->ind_tbl = wqs; 3391 + rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size; 3392 + rwq_ind_tbl->uobject = uobj; 3393 + uobj->object = rwq_ind_tbl; 3394 + rwq_ind_tbl->device = ib_dev; 3395 + atomic_set(&rwq_ind_tbl->usecnt, 0); 3396 + 3397 + for (i = 0; i < num_wq_handles; i++) 3398 + atomic_inc(&wqs[i]->usecnt); 3399 + 3400 + err = idr_add_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj); 3401 + if (err) 3402 + goto destroy_ind_tbl; 3403 + 3404 + resp.ind_tbl_handle = uobj->id; 3405 + resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num; 3406 + resp.response_length = required_resp_len; 3407 + 3408 + err = ib_copy_to_udata(ucore, 3409 + &resp, resp.response_length); 3410 + if (err) 3411 + goto err_copy; 3412 + 3413 + kfree(wqs_handles); 3414 + 3415 + for (j = 0; j < num_read_wqs; j++) 3416 + put_wq_read(wqs[j]); 3417 + 3418 + mutex_lock(&file->mutex); 3419 + list_add_tail(&uobj->list, &file->ucontext->rwq_ind_tbl_list); 3420 + mutex_unlock(&file->mutex); 3421 + 3422 + uobj->live = 1; 3423 + 3424 + up_write(&uobj->mutex); 3425 + return 0; 3426 + 3427 + err_copy: 3428 + idr_remove_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj); 3429 + destroy_ind_tbl: 3430 + ib_destroy_rwq_ind_table(rwq_ind_tbl); 3431 + err_uobj: 3432 + put_uobj_write(uobj); 3433 + put_wqs: 3434 + for (j = 0; j < num_read_wqs; j++) 3435 + put_wq_read(wqs[j]); 3436 + err_free: 3437 + kfree(wqs_handles); 3438 + kfree(wqs); 3439 + return err; 3440 + } 3441 + 3442 + int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file, 3443 + struct ib_device *ib_dev, 3444 + struct ib_udata *ucore, 3445 + struct ib_udata *uhw) 3446 + { 3447 + struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {}; 3448 + struct ib_rwq_ind_table *rwq_ind_tbl; 3449 + struct ib_uobject *uobj; 3450 + int ret; 3451 + struct ib_wq **ind_tbl; 3452 + size_t required_cmd_sz; 3453 + 3454 + required_cmd_sz = offsetof(typeof(cmd), ind_tbl_handle) + sizeof(cmd.ind_tbl_handle); 3455 + 3456 + if (ucore->inlen < required_cmd_sz) 3457 + return -EINVAL; 3458 + 3459 + if (ucore->inlen > sizeof(cmd) && 3460 + !ib_is_udata_cleared(ucore, sizeof(cmd), 3461 + ucore->inlen - sizeof(cmd))) 3462 + return -EOPNOTSUPP; 3463 + 3464 + ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3465 + if (ret) 3466 + return ret; 3467 + 3468 + if (cmd.comp_mask) 3469 + return -EOPNOTSUPP; 3470 + 3471 + uobj = idr_write_uobj(&ib_uverbs_rwq_ind_tbl_idr, cmd.ind_tbl_handle, 3472 + file->ucontext); 3473 + if (!uobj) 3474 + return -EINVAL; 3475 + rwq_ind_tbl = uobj->object; 3476 + ind_tbl = rwq_ind_tbl->ind_tbl; 3477 + 3478 + ret = ib_destroy_rwq_ind_table(rwq_ind_tbl); 3479 + if (!ret) 3480 + uobj->live = 0; 3481 + 3482 + put_uobj_write(uobj); 3483 + 3484 + if (ret) 3485 + return ret; 3486 + 3487 + idr_remove_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj); 3488 + 3489 + mutex_lock(&file->mutex); 3490 + list_del(&uobj->list); 3491 + mutex_unlock(&file->mutex); 3492 + 3493 + put_uobj(uobj); 3494 + kfree(ind_tbl); 3495 + return ret; 3131 3496 } 3132 3497 3133 3498 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
+38
drivers/infiniband/core/uverbs_main.c
··· 76 76 DEFINE_IDR(ib_uverbs_srq_idr); 77 77 DEFINE_IDR(ib_uverbs_xrcd_idr); 78 78 DEFINE_IDR(ib_uverbs_rule_idr); 79 + DEFINE_IDR(ib_uverbs_wq_idr); 80 + DEFINE_IDR(ib_uverbs_rwq_ind_tbl_idr); 79 81 80 82 static DEFINE_SPINLOCK(map_lock); 81 83 static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); ··· 132 130 [IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device, 133 131 [IB_USER_VERBS_EX_CMD_CREATE_CQ] = ib_uverbs_ex_create_cq, 134 132 [IB_USER_VERBS_EX_CMD_CREATE_QP] = ib_uverbs_ex_create_qp, 133 + [IB_USER_VERBS_EX_CMD_CREATE_WQ] = ib_uverbs_ex_create_wq, 134 + [IB_USER_VERBS_EX_CMD_MODIFY_WQ] = ib_uverbs_ex_modify_wq, 135 + [IB_USER_VERBS_EX_CMD_DESTROY_WQ] = ib_uverbs_ex_destroy_wq, 136 + [IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL] = ib_uverbs_ex_create_rwq_ind_table, 137 + [IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL] = ib_uverbs_ex_destroy_rwq_ind_table, 135 138 }; 136 139 137 140 static void ib_uverbs_add_one(struct ib_device *device); ··· 270 263 } 271 264 ib_uverbs_release_uevent(file, &uqp->uevent); 272 265 kfree(uqp); 266 + } 267 + 268 + list_for_each_entry_safe(uobj, tmp, &context->rwq_ind_tbl_list, list) { 269 + struct ib_rwq_ind_table *rwq_ind_tbl = uobj->object; 270 + struct ib_wq **ind_tbl = rwq_ind_tbl->ind_tbl; 271 + 272 + idr_remove_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj); 273 + ib_destroy_rwq_ind_table(rwq_ind_tbl); 274 + kfree(ind_tbl); 275 + kfree(uobj); 276 + } 277 + 278 + list_for_each_entry_safe(uobj, tmp, &context->wq_list, list) { 279 + struct ib_wq *wq = uobj->object; 280 + struct ib_uwq_object *uwq = 281 + container_of(uobj, struct ib_uwq_object, uevent.uobject); 282 + 283 + idr_remove_uobj(&ib_uverbs_wq_idr, uobj); 284 + ib_destroy_wq(wq); 285 + ib_uverbs_release_uevent(file, &uwq->uevent); 286 + kfree(uwq); 273 287 } 274 288 275 289 list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) { ··· 590 562 591 563 uobj = container_of(event->element.qp->uobject, 592 564 struct ib_uevent_object, uobject); 565 + 566 + ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle, 567 + event->event, &uobj->event_list, 568 + &uobj->events_reported); 569 + } 570 + 571 + void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr) 572 + { 573 + struct ib_uevent_object *uobj = container_of(event->element.wq->uobject, 574 + struct ib_uevent_object, uobject); 593 575 594 576 ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle, 595 577 event->event, &uobj->event_list,
+161 -2
drivers/infiniband/core/verbs.c
··· 754 754 struct ib_qp *qp; 755 755 int ret; 756 756 757 + if (qp_init_attr->rwq_ind_tbl && 758 + (qp_init_attr->recv_cq || 759 + qp_init_attr->srq || qp_init_attr->cap.max_recv_wr || 760 + qp_init_attr->cap.max_recv_sge)) 761 + return ERR_PTR(-EINVAL); 762 + 757 763 /* 758 764 * If the callers is using the RDMA API calculate the resources 759 765 * needed for the RDMA READ/WRITE operations. ··· 777 771 qp->real_qp = qp; 778 772 qp->uobject = NULL; 779 773 qp->qp_type = qp_init_attr->qp_type; 774 + qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl; 780 775 781 776 atomic_set(&qp->usecnt, 0); 782 777 qp->mrs_used = 0; ··· 795 788 qp->srq = NULL; 796 789 } else { 797 790 qp->recv_cq = qp_init_attr->recv_cq; 798 - atomic_inc(&qp_init_attr->recv_cq->usecnt); 791 + if (qp_init_attr->recv_cq) 792 + atomic_inc(&qp_init_attr->recv_cq->usecnt); 799 793 qp->srq = qp_init_attr->srq; 800 794 if (qp->srq) 801 795 atomic_inc(&qp_init_attr->srq->usecnt); ··· 807 799 qp->xrcd = NULL; 808 800 809 801 atomic_inc(&pd->usecnt); 810 - atomic_inc(&qp_init_attr->send_cq->usecnt); 802 + if (qp_init_attr->send_cq) 803 + atomic_inc(&qp_init_attr->send_cq->usecnt); 804 + if (qp_init_attr->rwq_ind_tbl) 805 + atomic_inc(&qp->rwq_ind_tbl->usecnt); 811 806 812 807 if (qp_init_attr->cap.max_rdma_ctxs) { 813 808 ret = rdma_rw_init_mrs(qp, qp_init_attr); ··· 1290 1279 struct ib_pd *pd; 1291 1280 struct ib_cq *scq, *rcq; 1292 1281 struct ib_srq *srq; 1282 + struct ib_rwq_ind_table *ind_tbl; 1293 1283 int ret; 1294 1284 1295 1285 WARN_ON_ONCE(qp->mrs_used > 0); ··· 1305 1293 scq = qp->send_cq; 1306 1294 rcq = qp->recv_cq; 1307 1295 srq = qp->srq; 1296 + ind_tbl = qp->rwq_ind_tbl; 1308 1297 1309 1298 if (!qp->uobject) 1310 1299 rdma_rw_cleanup_mrs(qp); ··· 1320 1307 atomic_dec(&rcq->usecnt); 1321 1308 if (srq) 1322 1309 atomic_dec(&srq->usecnt); 1310 + if (ind_tbl) 1311 + atomic_dec(&ind_tbl->usecnt); 1323 1312 } 1324 1313 1325 1314 return ret; ··· 1568 1553 return xrcd->device->dealloc_xrcd(xrcd); 1569 1554 } 1570 1555 EXPORT_SYMBOL(ib_dealloc_xrcd); 1556 + 1557 + /** 1558 + * ib_create_wq - Creates a WQ associated with the specified protection 1559 + * domain. 1560 + * @pd: The protection domain associated with the WQ. 1561 + * @wq_init_attr: A list of initial attributes required to create the 1562 + * WQ. If WQ creation succeeds, then the attributes are updated to 1563 + * the actual capabilities of the created WQ. 1564 + * 1565 + * wq_init_attr->max_wr and wq_init_attr->max_sge determine 1566 + * the requested size of the WQ, and set to the actual values allocated 1567 + * on return. 1568 + * If ib_create_wq() succeeds, then max_wr and max_sge will always be 1569 + * at least as large as the requested values. 1570 + */ 1571 + struct ib_wq *ib_create_wq(struct ib_pd *pd, 1572 + struct ib_wq_init_attr *wq_attr) 1573 + { 1574 + struct ib_wq *wq; 1575 + 1576 + if (!pd->device->create_wq) 1577 + return ERR_PTR(-ENOSYS); 1578 + 1579 + wq = pd->device->create_wq(pd, wq_attr, NULL); 1580 + if (!IS_ERR(wq)) { 1581 + wq->event_handler = wq_attr->event_handler; 1582 + wq->wq_context = wq_attr->wq_context; 1583 + wq->wq_type = wq_attr->wq_type; 1584 + wq->cq = wq_attr->cq; 1585 + wq->device = pd->device; 1586 + wq->pd = pd; 1587 + wq->uobject = NULL; 1588 + atomic_inc(&pd->usecnt); 1589 + atomic_inc(&wq_attr->cq->usecnt); 1590 + atomic_set(&wq->usecnt, 0); 1591 + } 1592 + return wq; 1593 + } 1594 + EXPORT_SYMBOL(ib_create_wq); 1595 + 1596 + /** 1597 + * ib_destroy_wq - Destroys the specified WQ. 1598 + * @wq: The WQ to destroy. 1599 + */ 1600 + int ib_destroy_wq(struct ib_wq *wq) 1601 + { 1602 + int err; 1603 + struct ib_cq *cq = wq->cq; 1604 + struct ib_pd *pd = wq->pd; 1605 + 1606 + if (atomic_read(&wq->usecnt)) 1607 + return -EBUSY; 1608 + 1609 + err = wq->device->destroy_wq(wq); 1610 + if (!err) { 1611 + atomic_dec(&pd->usecnt); 1612 + atomic_dec(&cq->usecnt); 1613 + } 1614 + return err; 1615 + } 1616 + EXPORT_SYMBOL(ib_destroy_wq); 1617 + 1618 + /** 1619 + * ib_modify_wq - Modifies the specified WQ. 1620 + * @wq: The WQ to modify. 1621 + * @wq_attr: On input, specifies the WQ attributes to modify. 1622 + * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ 1623 + * are being modified. 1624 + * On output, the current values of selected WQ attributes are returned. 1625 + */ 1626 + int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, 1627 + u32 wq_attr_mask) 1628 + { 1629 + int err; 1630 + 1631 + if (!wq->device->modify_wq) 1632 + return -ENOSYS; 1633 + 1634 + err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL); 1635 + return err; 1636 + } 1637 + EXPORT_SYMBOL(ib_modify_wq); 1638 + 1639 + /* 1640 + * ib_create_rwq_ind_table - Creates a RQ Indirection Table. 1641 + * @device: The device on which to create the rwq indirection table. 1642 + * @ib_rwq_ind_table_init_attr: A list of initial attributes required to 1643 + * create the Indirection Table. 1644 + * 1645 + * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less 1646 + * than the created ib_rwq_ind_table object and the caller is responsible 1647 + * for its memory allocation/free. 1648 + */ 1649 + struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, 1650 + struct ib_rwq_ind_table_init_attr *init_attr) 1651 + { 1652 + struct ib_rwq_ind_table *rwq_ind_table; 1653 + int i; 1654 + u32 table_size; 1655 + 1656 + if (!device->create_rwq_ind_table) 1657 + return ERR_PTR(-ENOSYS); 1658 + 1659 + table_size = (1 << init_attr->log_ind_tbl_size); 1660 + rwq_ind_table = device->create_rwq_ind_table(device, 1661 + init_attr, NULL); 1662 + if (IS_ERR(rwq_ind_table)) 1663 + return rwq_ind_table; 1664 + 1665 + rwq_ind_table->ind_tbl = init_attr->ind_tbl; 1666 + rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size; 1667 + rwq_ind_table->device = device; 1668 + rwq_ind_table->uobject = NULL; 1669 + atomic_set(&rwq_ind_table->usecnt, 0); 1670 + 1671 + for (i = 0; i < table_size; i++) 1672 + atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt); 1673 + 1674 + return rwq_ind_table; 1675 + } 1676 + EXPORT_SYMBOL(ib_create_rwq_ind_table); 1677 + 1678 + /* 1679 + * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table. 1680 + * @wq_ind_table: The Indirection Table to destroy. 1681 + */ 1682 + int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table) 1683 + { 1684 + int err, i; 1685 + u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size); 1686 + struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl; 1687 + 1688 + if (atomic_read(&rwq_ind_table->usecnt)) 1689 + return -EBUSY; 1690 + 1691 + err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table); 1692 + if (!err) { 1693 + for (i = 0; i < table_size; i++) 1694 + atomic_dec(&ind_tbl[i]->usecnt); 1695 + } 1696 + 1697 + return err; 1698 + } 1699 + EXPORT_SYMBOL(ib_destroy_rwq_ind_table); 1571 1700 1572 1701 struct ib_flow *ib_create_flow(struct ib_qp *qp, 1573 1702 struct ib_flow_attr *flow_attr,
+13 -14
drivers/infiniband/hw/cxgb3/iwch_provider.c
··· 1183 1183 return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type); 1184 1184 } 1185 1185 1186 - static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, char *buf) 1187 - { 1188 - struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev, 1189 - ibdev.dev); 1190 - struct ethtool_drvinfo info; 1191 - struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev; 1192 - 1193 - PDBG("%s dev 0x%p\n", __func__, dev); 1194 - lldev->ethtool_ops->get_drvinfo(lldev, &info); 1195 - return sprintf(buf, "%s\n", info.fw_version); 1196 - } 1197 - 1198 1186 static ssize_t show_hca(struct device *dev, struct device_attribute *attr, 1199 1187 char *buf) 1200 1188 { ··· 1322 1334 } 1323 1335 1324 1336 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 1325 - static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); 1326 1337 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); 1327 1338 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); 1328 1339 1329 1340 static struct device_attribute *iwch_class_attributes[] = { 1330 1341 &dev_attr_hw_rev, 1331 - &dev_attr_fw_ver, 1332 1342 &dev_attr_hca_type, 1333 1343 &dev_attr_board_id, 1334 1344 }; ··· 1346 1360 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; 1347 1361 1348 1362 return 0; 1363 + } 1364 + 1365 + static void get_dev_fw_ver_str(struct ib_device *ibdev, char *str, 1366 + size_t str_len) 1367 + { 1368 + struct iwch_dev *iwch_dev = to_iwch_dev(ibdev); 1369 + struct ethtool_drvinfo info; 1370 + struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev; 1371 + 1372 + PDBG("%s dev 0x%p\n", __func__, iwch_dev); 1373 + lldev->ethtool_ops->get_drvinfo(lldev, &info); 1374 + snprintf(str, str_len, "%s", info.fw_version); 1349 1375 } 1350 1376 1351 1377 int iwch_register_device(struct iwch_dev *dev) ··· 1435 1437 dev->ibdev.get_hw_stats = iwch_get_mib; 1436 1438 dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION; 1437 1439 dev->ibdev.get_port_immutable = iwch_port_immutable; 1440 + dev->ibdev.get_dev_fw_str = get_dev_fw_ver_str; 1438 1441 1439 1442 dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); 1440 1443 if (!dev->ibdev.iwcm)
+123 -58
drivers/infiniband/hw/cxgb4/cm.c
··· 294 294 return; 295 295 } 296 296 297 + static int alloc_ep_skb_list(struct sk_buff_head *ep_skb_list, int size) 298 + { 299 + struct sk_buff *skb; 300 + unsigned int i; 301 + size_t len; 302 + 303 + len = roundup(sizeof(union cpl_wr_size), 16); 304 + for (i = 0; i < size; i++) { 305 + skb = alloc_skb(len, GFP_KERNEL); 306 + if (!skb) 307 + goto fail; 308 + skb_queue_tail(ep_skb_list, skb); 309 + } 310 + return 0; 311 + fail: 312 + skb_queue_purge(ep_skb_list); 313 + return -ENOMEM; 314 + } 315 + 297 316 static void *alloc_ep(int size, gfp_t gfp) 298 317 { 299 318 struct c4iw_ep_common *epc; ··· 403 384 if (ep->mpa_skb) 404 385 kfree_skb(ep->mpa_skb); 405 386 } 387 + if (!skb_queue_empty(&ep->com.ep_skb_list)) 388 + skb_queue_purge(&ep->com.ep_skb_list); 406 389 kfree(ep); 407 390 } 408 391 ··· 641 620 } 642 621 } 643 622 644 - static int send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) 623 + static int send_flowc(struct c4iw_ep *ep) 645 624 { 646 - unsigned int flowclen = 80; 647 625 struct fw_flowc_wr *flowc; 626 + struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list); 648 627 int i; 649 628 u16 vlan = ep->l2t->vlan; 650 629 int nparams; 630 + 631 + if (WARN_ON(!skb)) 632 + return -ENOMEM; 651 633 652 634 if (vlan == CPL_L2T_VLAN_NONE) 653 635 nparams = 8; 654 636 else 655 637 nparams = 9; 656 638 657 - skb = get_skb(skb, flowclen, GFP_KERNEL); 658 - flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen); 639 + flowc = (struct fw_flowc_wr *)__skb_put(skb, FLOWC_LEN); 659 640 660 641 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | 661 642 FW_FLOWC_WR_NPARAMS_V(nparams)); 662 - flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(flowclen, 643 + flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(FLOWC_LEN, 663 644 16)) | FW_WR_FLOWID_V(ep->hwtid)); 664 645 665 646 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; ··· 702 679 return c4iw_ofld_send(&ep->com.dev->rdev, skb); 703 680 } 704 681 705 - static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp) 682 + static int send_halfclose(struct c4iw_ep *ep) 706 683 { 707 684 struct cpl_close_con_req *req; 708 - struct sk_buff *skb; 685 + struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list); 709 686 int wrlen = roundup(sizeof *req, 16); 710 687 711 688 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 712 - skb = get_skb(NULL, wrlen, gfp); 713 - if (!skb) { 714 - printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); 689 + if (WARN_ON(!skb)) 715 690 return -ENOMEM; 716 - } 691 + 717 692 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 718 693 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 719 694 req = (struct cpl_close_con_req *) skb_put(skb, wrlen); ··· 722 701 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 723 702 } 724 703 725 - static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) 704 + static int send_abort(struct c4iw_ep *ep) 726 705 { 727 706 struct cpl_abort_req *req; 728 707 int wrlen = roundup(sizeof *req, 16); 708 + struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list); 729 709 730 710 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 731 - skb = get_skb(skb, wrlen, gfp); 732 - if (!skb) { 733 - printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 734 - __func__); 711 + if (WARN_ON(!req_skb)) 735 712 return -ENOMEM; 736 - } 737 - set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 738 - t4_set_arp_err_handler(skb, ep, abort_arp_failure); 739 - req = (struct cpl_abort_req *) skb_put(skb, wrlen); 713 + 714 + set_wr_txq(req_skb, CPL_PRIORITY_DATA, ep->txq_idx); 715 + t4_set_arp_err_handler(req_skb, ep, abort_arp_failure); 716 + req = (struct cpl_abort_req *)skb_put(req_skb, wrlen); 740 717 memset(req, 0, wrlen); 741 718 INIT_TP_WR(req, ep->hwtid); 742 719 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); 743 720 req->cmd = CPL_ABORT_SEND_RST; 744 - return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 721 + return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t); 745 722 } 746 723 747 724 static void best_mtu(const unsigned short *mtus, unsigned short mtu, ··· 1011 992 1012 993 mpa = (struct mpa_message *)(req + 1); 1013 994 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); 1014 - mpa->flags = (crc_enabled ? MPA_CRC : 0) | 1015 - (markers_enabled ? MPA_MARKERS : 0) | 1016 - (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0); 995 + 996 + mpa->flags = 0; 997 + if (crc_enabled) 998 + mpa->flags |= MPA_CRC; 999 + if (markers_enabled) { 1000 + mpa->flags |= MPA_MARKERS; 1001 + ep->mpa_attr.recv_marker_enabled = 1; 1002 + } else { 1003 + ep->mpa_attr.recv_marker_enabled = 0; 1004 + } 1005 + if (mpa_rev_to_use == 2) 1006 + mpa->flags |= MPA_ENHANCED_RDMA_CONN; 1007 + 1017 1008 mpa->private_data_size = htons(ep->plen); 1018 1009 mpa->revision = mpa_rev_to_use; 1019 1010 if (mpa_rev_to_use == 1) { ··· 1198 1169 mpa = (struct mpa_message *)(req + 1); 1199 1170 memset(mpa, 0, sizeof(*mpa)); 1200 1171 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 1201 - mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | 1202 - (markers_enabled ? MPA_MARKERS : 0); 1172 + mpa->flags = 0; 1173 + if (ep->mpa_attr.crc_enabled) 1174 + mpa->flags |= MPA_CRC; 1175 + if (ep->mpa_attr.recv_marker_enabled) 1176 + mpa->flags |= MPA_MARKERS; 1203 1177 mpa->revision = ep->mpa_attr.version; 1204 1178 mpa->private_data_size = htons(plen); 1205 1179 ··· 1280 1248 set_bit(ACT_ESTAB, &ep->com.history); 1281 1249 1282 1250 /* start MPA negotiation */ 1283 - ret = send_flowc(ep, NULL); 1251 + ret = send_flowc(ep); 1284 1252 if (ret) 1285 1253 goto err; 1286 1254 if (ep->retry_with_mpa_v1) ··· 1587 1555 */ 1588 1556 __state_set(&ep->com, FPDU_MODE); 1589 1557 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1590 - ep->mpa_attr.recv_marker_enabled = markers_enabled; 1591 1558 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1592 1559 ep->mpa_attr.version = mpa->revision; 1593 1560 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; ··· 2035 2004 } 2036 2005 2037 2006 /* 2038 - * Return whether a failed active open has allocated a TID 2007 + * Some of the error codes above implicitly indicate that there is no TID 2008 + * allocated with the result of an ACT_OPEN. We use this predicate to make 2009 + * that explicit. 2039 2010 */ 2040 2011 static inline int act_open_has_tid(int status) 2041 2012 { 2042 - return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && 2043 - status != CPL_ERR_ARP_MISS; 2013 + return (status != CPL_ERR_TCAM_PARITY && 2014 + status != CPL_ERR_TCAM_MISS && 2015 + status != CPL_ERR_TCAM_FULL && 2016 + status != CPL_ERR_CONN_EXIST_SYNRECV && 2017 + status != CPL_ERR_CONN_EXIST); 2044 2018 } 2045 2019 2046 2020 /* Returns whether a CPL status conveys negative advice. ··· 2166 2130 static int c4iw_reconnect(struct c4iw_ep *ep) 2167 2131 { 2168 2132 int err = 0; 2133 + int size = 0; 2169 2134 struct sockaddr_in *laddr = (struct sockaddr_in *) 2170 2135 &ep->com.cm_id->m_local_addr; 2171 2136 struct sockaddr_in *raddr = (struct sockaddr_in *) ··· 2181 2144 PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); 2182 2145 init_timer(&ep->timer); 2183 2146 c4iw_init_wr_wait(&ep->com.wr_wait); 2147 + 2148 + /* When MPA revision is different on nodes, the node with MPA_rev=2 2149 + * tries to reconnect with MPA_rev 1 for the same EP through 2150 + * c4iw_reconnect(), where the same EP is assigned with new tid for 2151 + * further connection establishment. As we are using the same EP pointer 2152 + * for reconnect, few skbs are used during the previous c4iw_connect(), 2153 + * which leaves the EP with inadequate skbs for further 2154 + * c4iw_reconnect(), Further causing an assert BUG_ON() due to empty 2155 + * skb_list() during peer_abort(). Allocate skbs which is already used. 2156 + */ 2157 + size = (CN_MAX_CON_BUF - skb_queue_len(&ep->com.ep_skb_list)); 2158 + if (alloc_ep_skb_list(&ep->com.ep_skb_list, size)) { 2159 + err = -ENOMEM; 2160 + goto fail1; 2161 + } 2184 2162 2185 2163 /* 2186 2164 * Allocate an active TID to initiate a TCP connection. ··· 2262 2210 * response of 1st connect request. 2263 2211 */ 2264 2212 connect_reply_upcall(ep, -ECONNRESET); 2213 + fail1: 2265 2214 c4iw_put_ep(&ep->com); 2266 2215 out: 2267 2216 return err; ··· 2629 2576 if (peer_mss && child_ep->mtu > (peer_mss + hdrs)) 2630 2577 child_ep->mtu = peer_mss + hdrs; 2631 2578 2579 + skb_queue_head_init(&child_ep->com.ep_skb_list); 2580 + if (alloc_ep_skb_list(&child_ep->com.ep_skb_list, CN_MAX_CON_BUF)) 2581 + goto fail; 2582 + 2632 2583 state_set(&child_ep->com, CONNECTING); 2633 2584 child_ep->com.dev = dev; 2634 2585 child_ep->com.cm_id = NULL; ··· 2697 2640 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 2698 2641 } 2699 2642 goto out; 2643 + fail: 2644 + c4iw_put_ep(&child_ep->com); 2700 2645 reject: 2701 2646 reject_cr(dev, hwtid, skb); 2702 2647 if (parent_ep) ··· 2729 2670 ep->com.state = MPA_REQ_WAIT; 2730 2671 start_ep_timer(ep); 2731 2672 set_bit(PASS_ESTAB, &ep->com.history); 2732 - ret = send_flowc(ep, skb); 2673 + ret = send_flowc(ep); 2733 2674 mutex_unlock(&ep->com.mutex); 2734 2675 if (ret) 2735 2676 c4iw_ep_disconnect(ep, 1, GFP_KERNEL); ··· 2930 2871 } 2931 2872 mutex_unlock(&ep->com.mutex); 2932 2873 2933 - rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); 2934 - if (!rpl_skb) { 2935 - printk(KERN_ERR MOD "%s - cannot allocate skb!\n", 2936 - __func__); 2874 + rpl_skb = skb_dequeue(&ep->com.ep_skb_list); 2875 + if (WARN_ON(!rpl_skb)) { 2937 2876 release = 1; 2938 2877 goto out; 2939 2878 } ··· 3082 3025 3083 3026 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 3084 3027 { 3085 - int err = 0; 3086 - int disconnect = 0; 3028 + int abort; 3087 3029 struct c4iw_ep *ep = to_ep(cm_id); 3030 + 3088 3031 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 3089 3032 3090 3033 mutex_lock(&ep->com.mutex); ··· 3095 3038 } 3096 3039 set_bit(ULP_REJECT, &ep->com.history); 3097 3040 if (mpa_rev == 0) 3098 - disconnect = 2; 3099 - else { 3100 - err = send_mpa_reject(ep, pdata, pdata_len); 3101 - disconnect = 1; 3102 - } 3041 + abort = 1; 3042 + else 3043 + abort = send_mpa_reject(ep, pdata, pdata_len); 3103 3044 mutex_unlock(&ep->com.mutex); 3104 - if (disconnect) { 3105 - stop_ep_timer(ep); 3106 - err = c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL); 3107 - } 3045 + 3046 + stop_ep_timer(ep); 3047 + c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL); 3108 3048 c4iw_put_ep(&ep->com); 3109 3049 return 0; 3110 3050 } ··· 3302 3248 err = -ENOMEM; 3303 3249 goto out; 3304 3250 } 3251 + 3252 + skb_queue_head_init(&ep->com.ep_skb_list); 3253 + if (alloc_ep_skb_list(&ep->com.ep_skb_list, CN_MAX_CON_BUF)) { 3254 + err = -ENOMEM; 3255 + goto fail1; 3256 + } 3257 + 3305 3258 init_timer(&ep->timer); 3306 3259 ep->plen = conn_param->private_data_len; 3307 3260 if (ep->plen) ··· 3327 3266 if (!ep->com.qp) { 3328 3267 PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn); 3329 3268 err = -EINVAL; 3330 - goto fail1; 3269 + goto fail2; 3331 3270 } 3332 3271 ref_qp(ep); 3333 3272 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, ··· 3340 3279 if (ep->atid == -1) { 3341 3280 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); 3342 3281 err = -ENOMEM; 3343 - goto fail1; 3282 + goto fail2; 3344 3283 } 3345 3284 insert_handle(dev, &dev->atid_idr, ep, ep->atid); 3346 3285 ··· 3364 3303 if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) { 3365 3304 err = pick_local_ipaddrs(dev, cm_id); 3366 3305 if (err) 3367 - goto fail1; 3306 + goto fail2; 3368 3307 } 3369 3308 3370 3309 /* find a route */ ··· 3384 3323 if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) { 3385 3324 err = pick_local_ip6addrs(dev, cm_id); 3386 3325 if (err) 3387 - goto fail1; 3326 + goto fail2; 3388 3327 } 3389 3328 3390 3329 /* find a route */ ··· 3400 3339 if (!ep->dst) { 3401 3340 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); 3402 3341 err = -EHOSTUNREACH; 3403 - goto fail2; 3342 + goto fail3; 3404 3343 } 3405 3344 3406 3345 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true, 3407 3346 ep->com.dev->rdev.lldi.adapter_type, cm_id->tos); 3408 3347 if (err) { 3409 3348 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); 3410 - goto fail3; 3349 + goto fail4; 3411 3350 } 3412 3351 3413 3352 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", ··· 3423 3362 goto out; 3424 3363 3425 3364 cxgb4_l2t_release(ep->l2t); 3426 - fail3: 3365 + fail4: 3427 3366 dst_release(ep->dst); 3428 - fail2: 3367 + fail3: 3429 3368 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); 3430 3369 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 3431 - fail1: 3370 + fail2: 3371 + skb_queue_purge(&ep->com.ep_skb_list); 3432 3372 deref_cm_id(&ep->com); 3373 + fail1: 3433 3374 c4iw_put_ep(&ep->com); 3434 3375 out: 3435 3376 return err; ··· 3524 3461 err = -ENOMEM; 3525 3462 goto fail1; 3526 3463 } 3464 + skb_queue_head_init(&ep->com.ep_skb_list); 3527 3465 PDBG("%s ep %p\n", __func__, ep); 3528 3466 ep->com.cm_id = cm_id; 3529 3467 ref_cm_id(&ep->com); ··· 3641 3577 case MPA_REQ_RCVD: 3642 3578 case MPA_REP_SENT: 3643 3579 case FPDU_MODE: 3580 + case CONNECTING: 3644 3581 close = 1; 3645 3582 if (abrupt) 3646 3583 ep->com.state = ABORTING; ··· 3676 3611 if (abrupt) { 3677 3612 set_bit(EP_DISC_ABORT, &ep->com.history); 3678 3613 close_complete_upcall(ep, -ECONNRESET); 3679 - ret = send_abort(ep, NULL, gfp); 3614 + ret = send_abort(ep); 3680 3615 } else { 3681 3616 set_bit(EP_DISC_CLOSE, &ep->com.history); 3682 - ret = send_halfclose(ep, gfp); 3617 + ret = send_halfclose(ep); 3683 3618 } 3684 3619 if (ret) { 3685 3620 set_bit(EP_DISC_FAIL, &ep->com.history);
+25 -17
drivers/infiniband/hw/cxgb4/cq.c
··· 33 33 #include "iw_cxgb4.h" 34 34 35 35 static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, 36 - struct c4iw_dev_ucontext *uctx) 36 + struct c4iw_dev_ucontext *uctx, struct sk_buff *skb) 37 37 { 38 38 struct fw_ri_res_wr *res_wr; 39 39 struct fw_ri_res *res; 40 40 int wr_len; 41 41 struct c4iw_wr_wait wr_wait; 42 - struct sk_buff *skb; 43 42 int ret; 44 43 45 44 wr_len = sizeof *res_wr + sizeof *res; 46 - skb = alloc_skb(wr_len, GFP_KERNEL); 47 - if (!skb) 48 - return -ENOMEM; 49 45 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); 50 46 51 47 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); ··· 859 863 ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context) 860 864 : NULL; 861 865 destroy_cq(&chp->rhp->rdev, &chp->cq, 862 - ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx); 866 + ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx, 867 + chp->destroy_skb); 868 + chp->destroy_skb = NULL; 863 869 kfree(chp); 864 870 return 0; 865 871 } ··· 877 879 struct c4iw_cq *chp; 878 880 struct c4iw_create_cq_resp uresp; 879 881 struct c4iw_ucontext *ucontext = NULL; 880 - int ret; 882 + int ret, wr_len; 881 883 size_t memsize, hwentries; 882 884 struct c4iw_mm_entry *mm, *mm2; 883 885 ··· 893 895 chp = kzalloc(sizeof(*chp), GFP_KERNEL); 894 896 if (!chp) 895 897 return ERR_PTR(-ENOMEM); 898 + 899 + wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res); 900 + chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL); 901 + if (!chp->destroy_skb) { 902 + ret = -ENOMEM; 903 + goto err1; 904 + } 896 905 897 906 if (ib_context) 898 907 ucontext = to_c4iw_ucontext(ib_context); ··· 941 936 ret = create_cq(&rhp->rdev, &chp->cq, 942 937 ucontext ? &ucontext->uctx : &rhp->rdev.uctx); 943 938 if (ret) 944 - goto err1; 939 + goto err2; 945 940 946 941 chp->rhp = rhp; 947 942 chp->cq.size--; /* status page */ ··· 952 947 init_waitqueue_head(&chp->wait); 953 948 ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); 954 949 if (ret) 955 - goto err2; 950 + goto err3; 956 951 957 952 if (ucontext) { 958 953 mm = kmalloc(sizeof *mm, GFP_KERNEL); 959 954 if (!mm) 960 - goto err3; 955 + goto err4; 961 956 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); 962 957 if (!mm2) 963 - goto err4; 958 + goto err5; 964 959 965 960 uresp.qid_mask = rhp->rdev.cqmask; 966 961 uresp.cqid = chp->cq.cqid; ··· 975 970 ret = ib_copy_to_udata(udata, &uresp, 976 971 sizeof(uresp) - sizeof(uresp.reserved)); 977 972 if (ret) 978 - goto err5; 973 + goto err6; 979 974 980 975 mm->key = uresp.key; 981 976 mm->addr = virt_to_phys(chp->cq.queue); ··· 991 986 __func__, chp->cq.cqid, chp, chp->cq.size, 992 987 chp->cq.memsize, (unsigned long long) chp->cq.dma_addr); 993 988 return &chp->ibcq; 994 - err5: 989 + err6: 995 990 kfree(mm2); 996 - err4: 991 + err5: 997 992 kfree(mm); 998 - err3: 993 + err4: 999 994 remove_handle(rhp, &rhp->cqidr, chp->cq.cqid); 1000 - err2: 995 + err3: 1001 996 destroy_cq(&chp->rhp->rdev, &chp->cq, 1002 - ucontext ? &ucontext->uctx : &rhp->rdev.uctx); 997 + ucontext ? &ucontext->uctx : &rhp->rdev.uctx, 998 + chp->destroy_skb); 999 + err2: 1000 + kfree_skb(chp->destroy_skb); 1003 1001 err1: 1004 1002 kfree(chp); 1005 1003 return ERR_PTR(ret);
+1 -1
drivers/infiniband/hw/cxgb4/device.c
··· 317 317 idr_for_each(&qpd->devp->qpidr, count_idrs, &count); 318 318 spin_unlock_irq(&qpd->devp->lock); 319 319 320 - qpd->bufsize = count * 128; 320 + qpd->bufsize = count * 180; 321 321 qpd->buf = vmalloc(qpd->bufsize); 322 322 if (!qpd->buf) { 323 323 kfree(qpd);
+22
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
··· 384 384 struct ib_mr ibmr; 385 385 struct ib_umem *umem; 386 386 struct c4iw_dev *rhp; 387 + struct sk_buff *dereg_skb; 387 388 u64 kva; 388 389 struct tpt_attributes attr; 389 390 u64 *mpl; ··· 401 400 struct c4iw_mw { 402 401 struct ib_mw ibmw; 403 402 struct c4iw_dev *rhp; 403 + struct sk_buff *dereg_skb; 404 404 u64 kva; 405 405 struct tpt_attributes attr; 406 406 }; ··· 414 412 struct c4iw_cq { 415 413 struct ib_cq ibcq; 416 414 struct c4iw_dev *rhp; 415 + struct sk_buff *destroy_skb; 417 416 struct t4_cq cq; 418 417 spinlock_t lock; 419 418 spinlock_t comp_handler_lock; ··· 792 789 CM_ID_DEREFED = 28, 793 790 }; 794 791 792 + enum conn_pre_alloc_buffers { 793 + CN_ABORT_REQ_BUF, 794 + CN_ABORT_RPL_BUF, 795 + CN_CLOSE_CON_REQ_BUF, 796 + CN_DESTROY_BUF, 797 + CN_FLOWC_BUF, 798 + CN_MAX_CON_BUF 799 + }; 800 + 801 + #define FLOWC_LEN 80 802 + union cpl_wr_size { 803 + struct cpl_abort_req abrt_req; 804 + struct cpl_abort_rpl abrt_rpl; 805 + struct fw_ri_wr ri_req; 806 + struct cpl_close_con_req close_req; 807 + char flowc_buf[FLOWC_LEN]; 808 + }; 809 + 795 810 struct c4iw_ep_common { 796 811 struct iw_cm_id *cm_id; 797 812 struct c4iw_qp *qp; 798 813 struct c4iw_dev *dev; 814 + struct sk_buff_head ep_skb_list; 799 815 enum c4iw_ep_state state; 800 816 struct kref kref; 801 817 struct mutex mutex;
+74 -37
drivers/infiniband/hw/cxgb4/mem.c
··· 59 59 } 60 60 61 61 static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, 62 - u32 len, dma_addr_t data, int wait) 62 + u32 len, dma_addr_t data, 63 + int wait, struct sk_buff *skb) 63 64 { 64 - struct sk_buff *skb; 65 65 struct ulp_mem_io *req; 66 66 struct ulptx_sgl *sgl; 67 67 u8 wr_len; ··· 74 74 c4iw_init_wr_wait(&wr_wait); 75 75 wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16); 76 76 77 - skb = alloc_skb(wr_len, GFP_KERNEL); 78 - if (!skb) 79 - return -ENOMEM; 77 + if (!skb) { 78 + skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); 79 + if (!skb) 80 + return -ENOMEM; 81 + } 80 82 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); 81 83 82 84 req = (struct ulp_mem_io *)__skb_put(skb, wr_len); ··· 110 108 } 111 109 112 110 static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, 113 - void *data) 111 + void *data, struct sk_buff *skb) 114 112 { 115 - struct sk_buff *skb; 116 113 struct ulp_mem_io *req; 117 114 struct ulptx_idata *sc; 118 115 u8 wr_len, *to_dp, *from_dp; ··· 135 134 wr_len = roundup(sizeof *req + sizeof *sc + 136 135 roundup(copy_len, T4_ULPTX_MIN_IO), 16); 137 136 138 - skb = alloc_skb(wr_len, GFP_KERNEL); 139 - if (!skb) 140 - return -ENOMEM; 137 + if (!skb) { 138 + skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); 139 + if (!skb) 140 + return -ENOMEM; 141 + } 141 142 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); 142 143 143 144 req = (struct ulp_mem_io *)__skb_put(skb, wr_len); ··· 176 173 memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO - 177 174 (copy_len % T4_ULPTX_MIN_IO)); 178 175 ret = c4iw_ofld_send(rdev, skb); 176 + skb = NULL; 179 177 if (ret) 180 178 return ret; 181 179 len -= C4IW_MAX_INLINE_SIZE; ··· 186 182 return ret; 187 183 } 188 184 189 - static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) 185 + static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, 186 + void *data, struct sk_buff *skb) 190 187 { 191 188 u32 remain = len; 192 189 u32 dmalen; ··· 210 205 dmalen = T4_ULPTX_MAX_DMA; 211 206 remain -= dmalen; 212 207 ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr, 213 - !remain); 208 + !remain, skb); 214 209 if (ret) 215 210 goto out; 216 211 addr += dmalen >> 5; ··· 218 213 daddr += dmalen; 219 214 } 220 215 if (remain) 221 - ret = _c4iw_write_mem_inline(rdev, addr, remain, data); 216 + ret = _c4iw_write_mem_inline(rdev, addr, remain, data, skb); 222 217 out: 223 218 dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE); 224 219 return ret; ··· 229 224 * If data is NULL, clear len byte of memory to zero. 230 225 */ 231 226 static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, 232 - void *data) 227 + void *data, struct sk_buff *skb) 233 228 { 234 229 if (is_t5(rdev->lldi.adapter_type) && use_dsgl) { 235 230 if (len > inline_threshold) { 236 - if (_c4iw_write_mem_dma(rdev, addr, len, data)) { 231 + if (_c4iw_write_mem_dma(rdev, addr, len, data, skb)) { 237 232 printk_ratelimited(KERN_WARNING 238 233 "%s: dma map" 239 234 " failure (non fatal)\n", 240 235 pci_name(rdev->lldi.pdev)); 241 236 return _c4iw_write_mem_inline(rdev, addr, len, 242 - data); 243 - } else 237 + data, skb); 238 + } else { 244 239 return 0; 240 + } 245 241 } else 246 - return _c4iw_write_mem_inline(rdev, addr, len, data); 242 + return _c4iw_write_mem_inline(rdev, addr, 243 + len, data, skb); 247 244 } else 248 - return _c4iw_write_mem_inline(rdev, addr, len, data); 245 + return _c4iw_write_mem_inline(rdev, addr, len, data, skb); 249 246 } 250 247 251 248 /* ··· 260 253 u32 *stag, u8 stag_state, u32 pdid, 261 254 enum fw_ri_stag_type type, enum fw_ri_mem_perms perm, 262 255 int bind_enabled, u32 zbva, u64 to, 263 - u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr) 256 + u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr, 257 + struct sk_buff *skb) 264 258 { 265 259 int err; 266 260 struct fw_ri_tpte tpt; ··· 315 307 } 316 308 err = write_adapter_mem(rdev, stag_idx + 317 309 (rdev->lldi.vr->stag.start >> 5), 318 - sizeof(tpt), &tpt); 310 + sizeof(tpt), &tpt, skb); 319 311 320 312 if (reset_tpt_entry) { 321 313 c4iw_put_resource(&rdev->resource.tpt_table, stag_idx); ··· 335 327 __func__, pbl_addr, rdev->lldi.vr->pbl.start, 336 328 pbl_size); 337 329 338 - err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl); 330 + err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl, NULL); 339 331 return err; 340 332 } 341 333 342 334 static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size, 343 - u32 pbl_addr) 335 + u32 pbl_addr, struct sk_buff *skb) 344 336 { 345 337 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 346 - pbl_size, pbl_addr); 338 + pbl_size, pbl_addr, skb); 347 339 } 348 340 349 341 static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid) 350 342 { 351 343 *stag = T4_STAG_UNSET; 352 344 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0, 353 - 0UL, 0, 0, 0, 0); 345 + 0UL, 0, 0, 0, 0, NULL); 354 346 } 355 347 356 - static int deallocate_window(struct c4iw_rdev *rdev, u32 stag) 348 + static int deallocate_window(struct c4iw_rdev *rdev, u32 stag, 349 + struct sk_buff *skb) 357 350 { 358 351 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0, 359 - 0); 352 + 0, skb); 360 353 } 361 354 362 355 static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid, ··· 365 356 { 366 357 *stag = T4_STAG_UNSET; 367 358 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0, 368 - 0UL, 0, 0, pbl_size, pbl_addr); 359 + 0UL, 0, 0, pbl_size, pbl_addr, NULL); 369 360 } 370 361 371 362 static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag) ··· 392 383 mhp->attr.mw_bind_enable, mhp->attr.zbva, 393 384 mhp->attr.va_fbo, mhp->attr.len ? 394 385 mhp->attr.len : -1, shift - 12, 395 - mhp->attr.pbl_size, mhp->attr.pbl_addr); 386 + mhp->attr.pbl_size, mhp->attr.pbl_addr, NULL); 396 387 if (ret) 397 388 return ret; 398 389 399 390 ret = finish_mem_reg(mhp, stag); 400 - if (ret) 391 + if (ret) { 401 392 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, 402 - mhp->attr.pbl_addr); 393 + mhp->attr.pbl_addr, mhp->dereg_skb); 394 + mhp->dereg_skb = NULL; 395 + } 403 396 return ret; 404 397 } 405 398 ··· 434 423 if (!mhp) 435 424 return ERR_PTR(-ENOMEM); 436 425 426 + mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL); 427 + if (!mhp->dereg_skb) { 428 + ret = -ENOMEM; 429 + goto err0; 430 + } 431 + 437 432 mhp->rhp = rhp; 438 433 mhp->attr.pdid = php->pdid; 439 434 mhp->attr.perms = c4iw_ib_to_tpt_access(acc); ··· 452 435 453 436 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid, 454 437 FW_RI_STAG_NSMR, mhp->attr.perms, 455 - mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0); 438 + mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0, 439 + NULL); 456 440 if (ret) 457 441 goto err1; 458 442 ··· 463 445 return &mhp->ibmr; 464 446 err2: 465 447 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, 466 - mhp->attr.pbl_addr); 448 + mhp->attr.pbl_addr, mhp->dereg_skb); 467 449 err1: 450 + kfree_skb(mhp->dereg_skb); 451 + err0: 468 452 kfree(mhp); 469 453 return ERR_PTR(ret); 470 454 } ··· 501 481 if (!mhp) 502 482 return ERR_PTR(-ENOMEM); 503 483 484 + mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL); 485 + if (!mhp->dereg_skb) { 486 + kfree(mhp); 487 + return ERR_PTR(-ENOMEM); 488 + } 489 + 504 490 mhp->rhp = rhp; 505 491 506 492 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); 507 493 if (IS_ERR(mhp->umem)) { 508 494 err = PTR_ERR(mhp->umem); 495 + kfree_skb(mhp->dereg_skb); 509 496 kfree(mhp); 510 497 return ERR_PTR(err); 511 498 } ··· 577 550 578 551 err: 579 552 ib_umem_release(mhp->umem); 553 + kfree_skb(mhp->dereg_skb); 580 554 kfree(mhp); 581 555 return ERR_PTR(err); 582 556 } ··· 600 572 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 601 573 if (!mhp) 602 574 return ERR_PTR(-ENOMEM); 575 + 576 + mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL); 577 + if (!mhp->dereg_skb) { 578 + kfree(mhp); 579 + return ERR_PTR(-ENOMEM); 580 + } 581 + 603 582 ret = allocate_window(&rhp->rdev, &stag, php->pdid); 604 583 if (ret) { 584 + kfree(mhp->dereg_skb); 605 585 kfree(mhp); 606 586 return ERR_PTR(ret); 607 587 } ··· 620 584 mmid = (stag) >> 8; 621 585 mhp->ibmw.rkey = stag; 622 586 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) { 623 - deallocate_window(&rhp->rdev, mhp->attr.stag); 587 + deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb); 588 + kfree(mhp->dereg_skb); 624 589 kfree(mhp); 625 590 return ERR_PTR(-ENOMEM); 626 591 } ··· 639 602 rhp = mhp->rhp; 640 603 mmid = (mw->rkey) >> 8; 641 604 remove_handle(rhp, &rhp->mmidr, mmid); 642 - deallocate_window(&rhp->rdev, mhp->attr.stag); 605 + deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb); 643 606 kfree(mhp); 644 607 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp); 645 608 return 0; ··· 703 666 return &(mhp->ibmr); 704 667 err3: 705 668 dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size, 706 - mhp->attr.pbl_addr); 669 + mhp->attr.pbl_addr, mhp->dereg_skb); 707 670 err2: 708 671 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, 709 672 mhp->attr.pbl_size << 3); ··· 754 717 dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev, 755 718 mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr); 756 719 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, 757 - mhp->attr.pbl_addr); 720 + mhp->attr.pbl_addr, mhp->dereg_skb); 758 721 if (mhp->attr.pbl_size) 759 722 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, 760 723 mhp->attr.pbl_size << 3);
+15 -16
drivers/infiniband/hw/cxgb4/provider.c
··· 409 409 CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type)); 410 410 } 411 411 412 - static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, 413 - char *buf) 414 - { 415 - struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev, 416 - ibdev.dev); 417 - PDBG("%s dev 0x%p\n", __func__, dev); 418 - 419 - return sprintf(buf, "%u.%u.%u.%u\n", 420 - FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers), 421 - FW_HDR_FW_VER_MINOR_G(c4iw_dev->rdev.lldi.fw_vers), 422 - FW_HDR_FW_VER_MICRO_G(c4iw_dev->rdev.lldi.fw_vers), 423 - FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers)); 424 - } 425 - 426 412 static ssize_t show_hca(struct device *dev, struct device_attribute *attr, 427 413 char *buf) 428 414 { ··· 488 502 } 489 503 490 504 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 491 - static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); 492 505 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); 493 506 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); 494 507 495 508 static struct device_attribute *c4iw_class_attributes[] = { 496 509 &dev_attr_hw_rev, 497 - &dev_attr_fw_ver, 498 510 &dev_attr_hca_type, 499 511 &dev_attr_board_id, 500 512 }; ··· 512 528 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; 513 529 514 530 return 0; 531 + } 532 + 533 + static void get_dev_fw_str(struct ib_device *dev, char *str, 534 + size_t str_len) 535 + { 536 + struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev, 537 + ibdev); 538 + PDBG("%s dev 0x%p\n", __func__, dev); 539 + 540 + snprintf(str, str_len, "%u.%u.%u.%u", 541 + FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers), 542 + FW_HDR_FW_VER_MINOR_G(c4iw_dev->rdev.lldi.fw_vers), 543 + FW_HDR_FW_VER_MICRO_G(c4iw_dev->rdev.lldi.fw_vers), 544 + FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers)); 515 545 } 516 546 517 547 int c4iw_register_device(struct c4iw_dev *dev) ··· 603 605 dev->ibdev.get_hw_stats = c4iw_get_mib; 604 606 dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION; 605 607 dev->ibdev.get_port_immutable = c4iw_port_immutable; 608 + dev->ibdev.get_dev_fw_str = get_dev_fw_str; 606 609 dev->ibdev.drain_sq = c4iw_drain_sq; 607 610 dev->ibdev.drain_rq = c4iw_drain_rq; 608 611
+6 -4
drivers/infiniband/hw/cxgb4/qp.c
··· 1081 1081 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, 1082 1082 qhp->ep->hwtid); 1083 1083 1084 - skb = alloc_skb(sizeof *wqe, gfp); 1085 - if (!skb) 1084 + skb = skb_dequeue(&qhp->ep->com.ep_skb_list); 1085 + if (WARN_ON(!skb)) 1086 1086 return; 1087 + 1087 1088 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); 1088 1089 1089 1090 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); ··· 1203 1202 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, 1204 1203 ep->hwtid); 1205 1204 1206 - skb = alloc_skb(sizeof *wqe, GFP_KERNEL); 1207 - if (!skb) 1205 + skb = skb_dequeue(&ep->com.ep_skb_list); 1206 + if (WARN_ON(!skb)) 1208 1207 return -ENOMEM; 1208 + 1209 1209 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 1210 1210 1211 1211 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
+2
drivers/infiniband/hw/hfi1/hfi.h
··· 1174 1174 1175 1175 /* 8051 firmware version helper */ 1176 1176 #define dc8051_ver(a, b) ((a) << 8 | (b)) 1177 + #define dc8051_ver_maj(a) ((a & 0xff00) >> 8) 1178 + #define dc8051_ver_min(a) (a & 0x00ff) 1177 1179 1178 1180 /* f_put_tid types */ 1179 1181 #define PT_EXPECTED 0
+15
drivers/infiniband/hw/hfi1/verbs.c
··· 1291 1291 static void hfi1_fill_device_attr(struct hfi1_devdata *dd) 1292 1292 { 1293 1293 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; 1294 + u16 ver = dd->dc8051_ver; 1294 1295 1295 1296 memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props)); 1296 1297 1298 + rdi->dparms.props.fw_ver = ((u64)(dc8051_ver_maj(ver)) << 16) | 1299 + (u64)dc8051_ver_min(ver); 1297 1300 rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | 1298 1301 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | 1299 1302 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN | ··· 1570 1567 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL); 1571 1568 } 1572 1569 1570 + static void hfi1_get_dev_fw_str(struct ib_device *ibdev, char *str, 1571 + size_t str_len) 1572 + { 1573 + struct rvt_dev_info *rdi = ib_to_rvt(ibdev); 1574 + struct hfi1_ibdev *dev = dev_from_rdi(rdi); 1575 + u16 ver = dd_from_dev(dev)->dc8051_ver; 1576 + 1577 + snprintf(str, str_len, "%u.%u", dc8051_ver_maj(ver), 1578 + dc8051_ver_min(ver)); 1579 + } 1580 + 1573 1581 /** 1574 1582 * hfi1_register_ib_device - register our device with the infiniband core 1575 1583 * @dd: the device data structure ··· 1627 1613 1628 1614 /* keep process mad in the driver */ 1629 1615 ibdev->process_mad = hfi1_process_mad; 1616 + ibdev->get_dev_fw_str = hfi1_get_dev_fw_str; 1630 1617 1631 1618 strncpy(ibdev->node_desc, init_utsname()->nodename, 1632 1619 sizeof(ibdev->node_desc));
+10 -14
drivers/infiniband/hw/i40iw/i40iw_verbs.c
··· 1973 1973 } 1974 1974 1975 1975 /** 1976 - * i40iw_show_fw_ver 1977 - */ 1978 - static ssize_t i40iw_show_fw_ver(struct device *dev, 1979 - struct device_attribute *attr, char *buf) 1980 - { 1981 - u32 firmware_version = I40IW_FW_VERSION; 1982 - 1983 - return sprintf(buf, "%u.%u\n", firmware_version, 1984 - (firmware_version & 0x000000ff)); 1985 - } 1986 - 1987 - /** 1988 1976 * i40iw_show_hca 1989 1977 */ 1990 1978 static ssize_t i40iw_show_hca(struct device *dev, ··· 1992 2004 } 1993 2005 1994 2006 static DEVICE_ATTR(hw_rev, S_IRUGO, i40iw_show_rev, NULL); 1995 - static DEVICE_ATTR(fw_ver, S_IRUGO, i40iw_show_fw_ver, NULL); 1996 2007 static DEVICE_ATTR(hca_type, S_IRUGO, i40iw_show_hca, NULL); 1997 2008 static DEVICE_ATTR(board_id, S_IRUGO, i40iw_show_board, NULL); 1998 2009 1999 2010 static struct device_attribute *i40iw_dev_attributes[] = { 2000 2011 &dev_attr_hw_rev, 2001 - &dev_attr_fw_ver, 2002 2012 &dev_attr_hca_type, 2003 2013 &dev_attr_board_id 2004 2014 }; ··· 2413 2427 "iwRdmaInv" 2414 2428 }; 2415 2429 2430 + static void i40iw_get_dev_fw_str(struct ib_device *dev, char *str, 2431 + size_t str_len) 2432 + { 2433 + u32 firmware_version = I40IW_FW_VERSION; 2434 + 2435 + snprintf(str, str_len, "%u.%u", firmware_version, 2436 + (firmware_version & 0x000000ff)); 2437 + } 2438 + 2416 2439 /** 2417 2440 * i40iw_alloc_hw_stats - Allocate a hw stats structure 2418 2441 * @ibdev: device pointer from stack ··· 2645 2650 memcpy(iwibdev->ibdev.iwcm->ifname, netdev->name, 2646 2651 sizeof(iwibdev->ibdev.iwcm->ifname)); 2647 2652 iwibdev->ibdev.get_port_immutable = i40iw_port_immutable; 2653 + iwibdev->ibdev.get_dev_fw_str = i40iw_get_dev_fw_str; 2648 2654 iwibdev->ibdev.poll_cq = i40iw_poll_cq; 2649 2655 iwibdev->ibdev.req_notify_cq = i40iw_req_notify_cq; 2650 2656 iwibdev->ibdev.post_send = i40iw_post_send;
+12 -12
drivers/infiniband/hw/mlx4/main.c
··· 2022 2022 return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device); 2023 2023 } 2024 2024 2025 - static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, 2026 - char *buf) 2027 - { 2028 - struct mlx4_ib_dev *dev = 2029 - container_of(device, struct mlx4_ib_dev, ib_dev.dev); 2030 - return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32), 2031 - (int) (dev->dev->caps.fw_ver >> 16) & 0xffff, 2032 - (int) dev->dev->caps.fw_ver & 0xffff); 2033 - } 2034 - 2035 2025 static ssize_t show_rev(struct device *device, struct device_attribute *attr, 2036 2026 char *buf) 2037 2027 { ··· 2040 2050 } 2041 2051 2042 2052 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 2043 - static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); 2044 2053 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); 2045 2054 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); 2046 2055 2047 2056 static struct device_attribute *mlx4_class_attributes[] = { 2048 2057 &dev_attr_hw_rev, 2049 - &dev_attr_fw_ver, 2050 2058 &dev_attr_hca_type, 2051 2059 &dev_attr_board_id 2052 2060 }; ··· 2265 2277 return 0; 2266 2278 } 2267 2279 2280 + static void get_fw_ver_str(struct ib_device *device, char *str, 2281 + size_t str_len) 2282 + { 2283 + struct mlx4_ib_dev *dev = 2284 + container_of(device, struct mlx4_ib_dev, ib_dev); 2285 + snprintf(str, str_len, "%d.%d.%d", 2286 + (int) (dev->dev->caps.fw_ver >> 32), 2287 + (int) (dev->dev->caps.fw_ver >> 16) & 0xffff, 2288 + (int) dev->dev->caps.fw_ver & 0xffff); 2289 + } 2290 + 2268 2291 static void *mlx4_ib_add(struct mlx4_dev *dev) 2269 2292 { 2270 2293 struct mlx4_ib_dev *ibdev; ··· 2409 2410 ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach; 2410 2411 ibdev->ib_dev.process_mad = mlx4_ib_process_mad; 2411 2412 ibdev->ib_dev.get_port_immutable = mlx4_port_immutable; 2413 + ibdev->ib_dev.get_dev_fw_str = get_fw_ver_str; 2412 2414 ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext; 2413 2415 2414 2416 if (!mlx4_is_slave(ibdev->dev)) {
+86 -1
drivers/infiniband/hw/mlx5/cq.c
··· 424 424 item->key = be32_to_cpu(cqe->mkey); 425 425 } 426 426 427 + static void sw_send_comp(struct mlx5_ib_qp *qp, int num_entries, 428 + struct ib_wc *wc, int *npolled) 429 + { 430 + struct mlx5_ib_wq *wq; 431 + unsigned int cur; 432 + unsigned int idx; 433 + int np; 434 + int i; 435 + 436 + wq = &qp->sq; 437 + cur = wq->head - wq->tail; 438 + np = *npolled; 439 + 440 + if (cur == 0) 441 + return; 442 + 443 + for (i = 0; i < cur && np < num_entries; i++) { 444 + idx = wq->last_poll & (wq->wqe_cnt - 1); 445 + wc->wr_id = wq->wrid[idx]; 446 + wc->status = IB_WC_WR_FLUSH_ERR; 447 + wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR; 448 + wq->tail++; 449 + np++; 450 + wc->qp = &qp->ibqp; 451 + wc++; 452 + wq->last_poll = wq->w_list[idx].next; 453 + } 454 + *npolled = np; 455 + } 456 + 457 + static void sw_recv_comp(struct mlx5_ib_qp *qp, int num_entries, 458 + struct ib_wc *wc, int *npolled) 459 + { 460 + struct mlx5_ib_wq *wq; 461 + unsigned int cur; 462 + int np; 463 + int i; 464 + 465 + wq = &qp->rq; 466 + cur = wq->head - wq->tail; 467 + np = *npolled; 468 + 469 + if (cur == 0) 470 + return; 471 + 472 + for (i = 0; i < cur && np < num_entries; i++) { 473 + wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 474 + wc->status = IB_WC_WR_FLUSH_ERR; 475 + wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR; 476 + wq->tail++; 477 + np++; 478 + wc->qp = &qp->ibqp; 479 + wc++; 480 + } 481 + *npolled = np; 482 + } 483 + 484 + static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries, 485 + struct ib_wc *wc, int *npolled) 486 + { 487 + struct mlx5_ib_qp *qp; 488 + 489 + *npolled = 0; 490 + /* Find uncompleted WQEs belonging to that cq and retrun mmics ones */ 491 + list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) { 492 + sw_send_comp(qp, num_entries, wc + *npolled, npolled); 493 + if (*npolled >= num_entries) 494 + return; 495 + } 496 + 497 + list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) { 498 + sw_recv_comp(qp, num_entries, wc + *npolled, npolled); 499 + if (*npolled >= num_entries) 500 + return; 501 + } 502 + } 503 + 427 504 static int mlx5_poll_one(struct mlx5_ib_cq *cq, 428 505 struct mlx5_ib_qp **cur_qp, 429 506 struct ib_wc *wc) ··· 671 594 { 672 595 struct mlx5_ib_cq *cq = to_mcq(ibcq); 673 596 struct mlx5_ib_qp *cur_qp = NULL; 597 + struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); 598 + struct mlx5_core_dev *mdev = dev->mdev; 674 599 unsigned long flags; 675 600 int soft_polled = 0; 676 601 int npolled; 677 602 int err = 0; 678 603 679 604 spin_lock_irqsave(&cq->lock, flags); 605 + if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 606 + mlx5_ib_poll_sw_comp(cq, num_entries, wc, &npolled); 607 + goto out; 608 + } 680 609 681 610 if (unlikely(!list_empty(&cq->wc_list))) 682 611 soft_polled = poll_soft_wc(cq, num_entries, wc); ··· 695 612 696 613 if (npolled) 697 614 mlx5_cq_set_ci(&cq->mcq); 698 - 615 + out: 699 616 spin_unlock_irqrestore(&cq->lock, flags); 700 617 701 618 if (err == 0 || err == -EAGAIN) ··· 926 843 cq->resize_buf = NULL; 927 844 cq->resize_umem = NULL; 928 845 cq->create_flags = attr->flags; 846 + INIT_LIST_HEAD(&cq->list_send_qp); 847 + INIT_LIST_HEAD(&cq->list_recv_qp); 929 848 930 849 if (context) { 931 850 err = create_cq_user(dev, udata, context, cq, entries,
+406 -23
drivers/infiniband/hw/mlx5/main.c
··· 42 42 #include <asm/pat.h> 43 43 #endif 44 44 #include <linux/sched.h> 45 + #include <linux/delay.h> 45 46 #include <rdma/ib_user_verbs.h> 46 47 #include <rdma/ib_addr.h> 47 48 #include <rdma/ib_cache.h> 48 49 #include <linux/mlx5/port.h> 49 50 #include <linux/mlx5/vport.h> 51 + #include <linux/list.h> 50 52 #include <rdma/ib_smi.h> 51 53 #include <rdma/ib_umem.h> 52 54 #include <linux/in.h> ··· 459 457 int max_rq_sg; 460 458 int max_sq_sg; 461 459 u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz); 460 + struct mlx5_ib_query_device_resp resp = {}; 461 + size_t resp_len; 462 + u64 max_tso; 462 463 463 - if (uhw->inlen || uhw->outlen) 464 + resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length); 465 + if (uhw->outlen && uhw->outlen < resp_len) 466 + return -EINVAL; 467 + else 468 + resp.response_length = resp_len; 469 + 470 + if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen)) 464 471 return -EINVAL; 465 472 466 473 memset(props, 0, sizeof(*props)); ··· 522 511 if (MLX5_CAP_GEN(mdev, block_lb_mc)) 523 512 props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; 524 513 525 - if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && 526 - (MLX5_CAP_ETH(dev->mdev, csum_cap))) 514 + if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads)) { 515 + if (MLX5_CAP_ETH(mdev, csum_cap)) 527 516 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; 517 + 518 + if (field_avail(typeof(resp), tso_caps, uhw->outlen)) { 519 + max_tso = MLX5_CAP_ETH(mdev, max_lso_cap); 520 + if (max_tso) { 521 + resp.tso_caps.max_tso = 1 << max_tso; 522 + resp.tso_caps.supported_qpts |= 523 + 1 << IB_QPT_RAW_PACKET; 524 + resp.response_length += sizeof(resp.tso_caps); 525 + } 526 + } 527 + } 528 528 529 529 if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) { 530 530 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; ··· 597 575 598 576 if (!mlx5_core_is_pf(mdev)) 599 577 props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION; 578 + 579 + if (uhw->outlen) { 580 + err = ib_copy_to_udata(uhw, &resp, resp.response_length); 581 + 582 + if (err) 583 + return err; 584 + } 600 585 601 586 return 0; 602 587 } ··· 1012 983 goto out_uars; 1013 984 } 1014 985 986 + INIT_LIST_HEAD(&context->vma_private_list); 1015 987 INIT_LIST_HEAD(&context->db_page_list); 1016 988 mutex_init(&context->db_page_mutex); 1017 989 ··· 1021 991 1022 992 if (field_avail(typeof(resp), cqe_version, udata->outlen)) 1023 993 resp.response_length += sizeof(resp.cqe_version); 994 + 995 + if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) { 996 + resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE; 997 + resp.response_length += sizeof(resp.cmds_supp_uhw); 998 + } 1024 999 1025 1000 /* 1026 1001 * We don't want to expose information from the PCI bar that is located ··· 1041 1006 offsetof(struct mlx5_init_seg, internal_timer_h) % 1042 1007 PAGE_SIZE; 1043 1008 resp.response_length += sizeof(resp.hca_core_clock_offset) + 1044 - sizeof(resp.reserved2) + 1045 - sizeof(resp.reserved3); 1009 + sizeof(resp.reserved2); 1046 1010 } 1047 1011 1048 1012 err = ib_copy_to_udata(udata, &resp, resp.response_length); ··· 1120 1086 return get_arg(offset); 1121 1087 } 1122 1088 1089 + static void mlx5_ib_vma_open(struct vm_area_struct *area) 1090 + { 1091 + /* vma_open is called when a new VMA is created on top of our VMA. This 1092 + * is done through either mremap flow or split_vma (usually due to 1093 + * mlock, madvise, munmap, etc.) We do not support a clone of the VMA, 1094 + * as this VMA is strongly hardware related. Therefore we set the 1095 + * vm_ops of the newly created/cloned VMA to NULL, to prevent it from 1096 + * calling us again and trying to do incorrect actions. We assume that 1097 + * the original VMA size is exactly a single page, and therefore all 1098 + * "splitting" operation will not happen to it. 1099 + */ 1100 + area->vm_ops = NULL; 1101 + } 1102 + 1103 + static void mlx5_ib_vma_close(struct vm_area_struct *area) 1104 + { 1105 + struct mlx5_ib_vma_private_data *mlx5_ib_vma_priv_data; 1106 + 1107 + /* It's guaranteed that all VMAs opened on a FD are closed before the 1108 + * file itself is closed, therefore no sync is needed with the regular 1109 + * closing flow. (e.g. mlx5 ib_dealloc_ucontext) 1110 + * However need a sync with accessing the vma as part of 1111 + * mlx5_ib_disassociate_ucontext. 1112 + * The close operation is usually called under mm->mmap_sem except when 1113 + * process is exiting. 1114 + * The exiting case is handled explicitly as part of 1115 + * mlx5_ib_disassociate_ucontext. 1116 + */ 1117 + mlx5_ib_vma_priv_data = (struct mlx5_ib_vma_private_data *)area->vm_private_data; 1118 + 1119 + /* setting the vma context pointer to null in the mlx5_ib driver's 1120 + * private data, to protect a race condition in 1121 + * mlx5_ib_disassociate_ucontext(). 1122 + */ 1123 + mlx5_ib_vma_priv_data->vma = NULL; 1124 + list_del(&mlx5_ib_vma_priv_data->list); 1125 + kfree(mlx5_ib_vma_priv_data); 1126 + } 1127 + 1128 + static const struct vm_operations_struct mlx5_ib_vm_ops = { 1129 + .open = mlx5_ib_vma_open, 1130 + .close = mlx5_ib_vma_close 1131 + }; 1132 + 1133 + static int mlx5_ib_set_vma_data(struct vm_area_struct *vma, 1134 + struct mlx5_ib_ucontext *ctx) 1135 + { 1136 + struct mlx5_ib_vma_private_data *vma_prv; 1137 + struct list_head *vma_head = &ctx->vma_private_list; 1138 + 1139 + vma_prv = kzalloc(sizeof(*vma_prv), GFP_KERNEL); 1140 + if (!vma_prv) 1141 + return -ENOMEM; 1142 + 1143 + vma_prv->vma = vma; 1144 + vma->vm_private_data = vma_prv; 1145 + vma->vm_ops = &mlx5_ib_vm_ops; 1146 + 1147 + list_add(&vma_prv->list, vma_head); 1148 + 1149 + return 0; 1150 + } 1151 + 1152 + static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) 1153 + { 1154 + int ret; 1155 + struct vm_area_struct *vma; 1156 + struct mlx5_ib_vma_private_data *vma_private, *n; 1157 + struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); 1158 + struct task_struct *owning_process = NULL; 1159 + struct mm_struct *owning_mm = NULL; 1160 + 1161 + owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID); 1162 + if (!owning_process) 1163 + return; 1164 + 1165 + owning_mm = get_task_mm(owning_process); 1166 + if (!owning_mm) { 1167 + pr_info("no mm, disassociate ucontext is pending task termination\n"); 1168 + while (1) { 1169 + put_task_struct(owning_process); 1170 + usleep_range(1000, 2000); 1171 + owning_process = get_pid_task(ibcontext->tgid, 1172 + PIDTYPE_PID); 1173 + if (!owning_process || 1174 + owning_process->state == TASK_DEAD) { 1175 + pr_info("disassociate ucontext done, task was terminated\n"); 1176 + /* in case task was dead need to release the 1177 + * task struct. 1178 + */ 1179 + if (owning_process) 1180 + put_task_struct(owning_process); 1181 + return; 1182 + } 1183 + } 1184 + } 1185 + 1186 + /* need to protect from a race on closing the vma as part of 1187 + * mlx5_ib_vma_close. 1188 + */ 1189 + down_read(&owning_mm->mmap_sem); 1190 + list_for_each_entry_safe(vma_private, n, &context->vma_private_list, 1191 + list) { 1192 + vma = vma_private->vma; 1193 + ret = zap_vma_ptes(vma, vma->vm_start, 1194 + PAGE_SIZE); 1195 + WARN_ONCE(ret, "%s: zap_vma_ptes failed", __func__); 1196 + /* context going to be destroyed, should 1197 + * not access ops any more. 1198 + */ 1199 + vma->vm_ops = NULL; 1200 + list_del(&vma_private->list); 1201 + kfree(vma_private); 1202 + } 1203 + up_read(&owning_mm->mmap_sem); 1204 + mmput(owning_mm); 1205 + put_task_struct(owning_process); 1206 + } 1207 + 1123 1208 static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd) 1124 1209 { 1125 1210 switch (cmd) { ··· 1254 1101 } 1255 1102 1256 1103 static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, 1257 - struct vm_area_struct *vma, struct mlx5_uuar_info *uuari) 1104 + struct vm_area_struct *vma, 1105 + struct mlx5_ib_ucontext *context) 1258 1106 { 1107 + struct mlx5_uuar_info *uuari = &context->uuari; 1259 1108 int err; 1260 1109 unsigned long idx; 1261 1110 phys_addr_t pfn, pa; ··· 1307 1152 mlx5_ib_dbg(dev, "mapped %s at 0x%lx, PA %pa\n", mmap_cmd2str(cmd), 1308 1153 vma->vm_start, &pa); 1309 1154 1310 - return 0; 1155 + return mlx5_ib_set_vma_data(vma, context); 1311 1156 } 1312 1157 1313 1158 static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) 1314 1159 { 1315 1160 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); 1316 1161 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); 1317 - struct mlx5_uuar_info *uuari = &context->uuari; 1318 1162 unsigned long command; 1319 1163 phys_addr_t pfn; 1320 1164 ··· 1322 1168 case MLX5_IB_MMAP_WC_PAGE: 1323 1169 case MLX5_IB_MMAP_NC_PAGE: 1324 1170 case MLX5_IB_MMAP_REGULAR_PAGE: 1325 - return uar_mmap(dev, command, vma, uuari); 1171 + return uar_mmap(dev, command, vma, context); 1326 1172 1327 1173 case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES: 1328 1174 return -ENOSYS; ··· 1484 1330 dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 1485 1331 &ib_spec->ipv4.val.dst_ip, 1486 1332 sizeof(ib_spec->ipv4.val.dst_ip)); 1333 + break; 1334 + case IB_FLOW_SPEC_IPV6: 1335 + if (ib_spec->size != sizeof(ib_spec->ipv6)) 1336 + return -EINVAL; 1337 + 1338 + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1339 + ethertype, 0xffff); 1340 + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1341 + ethertype, ETH_P_IPV6); 1342 + 1343 + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 1344 + src_ipv4_src_ipv6.ipv6_layout.ipv6), 1345 + &ib_spec->ipv6.mask.src_ip, 1346 + sizeof(ib_spec->ipv6.mask.src_ip)); 1347 + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, 1348 + src_ipv4_src_ipv6.ipv6_layout.ipv6), 1349 + &ib_spec->ipv6.val.src_ip, 1350 + sizeof(ib_spec->ipv6.val.src_ip)); 1351 + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 1352 + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 1353 + &ib_spec->ipv6.mask.dst_ip, 1354 + sizeof(ib_spec->ipv6.mask.dst_ip)); 1355 + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, 1356 + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 1357 + &ib_spec->ipv6.val.dst_ip, 1358 + sizeof(ib_spec->ipv6.val.dst_ip)); 1487 1359 break; 1488 1360 case IB_FLOW_SPEC_TCP: 1489 1361 if (ib_spec->size != sizeof(ib_spec->tcp_udp)) ··· 1984 1804 return sprintf(buf, "MT%d\n", dev->mdev->pdev->device); 1985 1805 } 1986 1806 1987 - static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, 1988 - char *buf) 1989 - { 1990 - struct mlx5_ib_dev *dev = 1991 - container_of(device, struct mlx5_ib_dev, ib_dev.dev); 1992 - return sprintf(buf, "%d.%d.%04d\n", fw_rev_maj(dev->mdev), 1993 - fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev)); 1994 - } 1995 - 1996 1807 static ssize_t show_rev(struct device *device, struct device_attribute *attr, 1997 1808 char *buf) 1998 1809 { ··· 2002 1831 } 2003 1832 2004 1833 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 2005 - static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); 2006 1834 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); 2007 1835 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); 2008 1836 static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL); ··· 2009 1839 2010 1840 static struct device_attribute *mlx5_class_attributes[] = { 2011 1841 &dev_attr_hw_rev, 2012 - &dev_attr_fw_ver, 2013 1842 &dev_attr_hca_type, 2014 1843 &dev_attr_board_id, 2015 1844 &dev_attr_fw_pages, ··· 2026 1857 mutex_unlock(&ports->devr->mutex); 2027 1858 } 2028 1859 1860 + static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev) 1861 + { 1862 + struct mlx5_ib_qp *mqp; 1863 + struct mlx5_ib_cq *send_mcq, *recv_mcq; 1864 + struct mlx5_core_cq *mcq; 1865 + struct list_head cq_armed_list; 1866 + unsigned long flags_qp; 1867 + unsigned long flags_cq; 1868 + unsigned long flags; 1869 + 1870 + INIT_LIST_HEAD(&cq_armed_list); 1871 + 1872 + /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/ 1873 + spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags); 1874 + list_for_each_entry(mqp, &ibdev->qp_list, qps_list) { 1875 + spin_lock_irqsave(&mqp->sq.lock, flags_qp); 1876 + if (mqp->sq.tail != mqp->sq.head) { 1877 + send_mcq = to_mcq(mqp->ibqp.send_cq); 1878 + spin_lock_irqsave(&send_mcq->lock, flags_cq); 1879 + if (send_mcq->mcq.comp && 1880 + mqp->ibqp.send_cq->comp_handler) { 1881 + if (!send_mcq->mcq.reset_notify_added) { 1882 + send_mcq->mcq.reset_notify_added = 1; 1883 + list_add_tail(&send_mcq->mcq.reset_notify, 1884 + &cq_armed_list); 1885 + } 1886 + } 1887 + spin_unlock_irqrestore(&send_mcq->lock, flags_cq); 1888 + } 1889 + spin_unlock_irqrestore(&mqp->sq.lock, flags_qp); 1890 + spin_lock_irqsave(&mqp->rq.lock, flags_qp); 1891 + /* no handling is needed for SRQ */ 1892 + if (!mqp->ibqp.srq) { 1893 + if (mqp->rq.tail != mqp->rq.head) { 1894 + recv_mcq = to_mcq(mqp->ibqp.recv_cq); 1895 + spin_lock_irqsave(&recv_mcq->lock, flags_cq); 1896 + if (recv_mcq->mcq.comp && 1897 + mqp->ibqp.recv_cq->comp_handler) { 1898 + if (!recv_mcq->mcq.reset_notify_added) { 1899 + recv_mcq->mcq.reset_notify_added = 1; 1900 + list_add_tail(&recv_mcq->mcq.reset_notify, 1901 + &cq_armed_list); 1902 + } 1903 + } 1904 + spin_unlock_irqrestore(&recv_mcq->lock, 1905 + flags_cq); 1906 + } 1907 + } 1908 + spin_unlock_irqrestore(&mqp->rq.lock, flags_qp); 1909 + } 1910 + /*At that point all inflight post send were put to be executed as of we 1911 + * lock/unlock above locks Now need to arm all involved CQs. 1912 + */ 1913 + list_for_each_entry(mcq, &cq_armed_list, reset_notify) { 1914 + mcq->comp(mcq); 1915 + } 1916 + spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags); 1917 + } 1918 + 2029 1919 static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, 2030 1920 enum mlx5_dev_event event, unsigned long param) 2031 1921 { ··· 2097 1869 case MLX5_DEV_EVENT_SYS_ERROR: 2098 1870 ibdev->ib_active = false; 2099 1871 ibev.event = IB_EVENT_DEVICE_FATAL; 1872 + mlx5_ib_handle_internal_error(ibdev); 2100 1873 break; 2101 1874 2102 1875 case MLX5_DEV_EVENT_PORT_UP: ··· 2504 2275 return 0; 2505 2276 } 2506 2277 2278 + static void get_dev_fw_str(struct ib_device *ibdev, char *str, 2279 + size_t str_len) 2280 + { 2281 + struct mlx5_ib_dev *dev = 2282 + container_of(ibdev, struct mlx5_ib_dev, ib_dev); 2283 + snprintf(str, str_len, "%d.%d.%04d", fw_rev_maj(dev->mdev), 2284 + fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev)); 2285 + } 2286 + 2507 2287 static int mlx5_enable_roce(struct mlx5_ib_dev *dev) 2508 2288 { 2509 2289 int err; ··· 2539 2301 unregister_netdevice_notifier(&dev->roce.nb); 2540 2302 } 2541 2303 2304 + static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev) 2305 + { 2306 + unsigned int i; 2307 + 2308 + for (i = 0; i < dev->num_ports; i++) 2309 + mlx5_core_dealloc_q_counter(dev->mdev, 2310 + dev->port[i].q_cnt_id); 2311 + } 2312 + 2313 + static int mlx5_ib_alloc_q_counters(struct mlx5_ib_dev *dev) 2314 + { 2315 + int i; 2316 + int ret; 2317 + 2318 + for (i = 0; i < dev->num_ports; i++) { 2319 + ret = mlx5_core_alloc_q_counter(dev->mdev, 2320 + &dev->port[i].q_cnt_id); 2321 + if (ret) { 2322 + mlx5_ib_warn(dev, 2323 + "couldn't allocate queue counter for port %d, err %d\n", 2324 + i + 1, ret); 2325 + goto dealloc_counters; 2326 + } 2327 + } 2328 + 2329 + return 0; 2330 + 2331 + dealloc_counters: 2332 + while (--i >= 0) 2333 + mlx5_core_dealloc_q_counter(dev->mdev, 2334 + dev->port[i].q_cnt_id); 2335 + 2336 + return ret; 2337 + } 2338 + 2339 + static const char const *names[] = { 2340 + "rx_write_requests", 2341 + "rx_read_requests", 2342 + "rx_atomic_requests", 2343 + "out_of_buffer", 2344 + "out_of_sequence", 2345 + "duplicate_request", 2346 + "rnr_nak_retry_err", 2347 + "packet_seq_err", 2348 + "implied_nak_seq_err", 2349 + "local_ack_timeout_err", 2350 + }; 2351 + 2352 + static const size_t stats_offsets[] = { 2353 + MLX5_BYTE_OFF(query_q_counter_out, rx_write_requests), 2354 + MLX5_BYTE_OFF(query_q_counter_out, rx_read_requests), 2355 + MLX5_BYTE_OFF(query_q_counter_out, rx_atomic_requests), 2356 + MLX5_BYTE_OFF(query_q_counter_out, out_of_buffer), 2357 + MLX5_BYTE_OFF(query_q_counter_out, out_of_sequence), 2358 + MLX5_BYTE_OFF(query_q_counter_out, duplicate_request), 2359 + MLX5_BYTE_OFF(query_q_counter_out, rnr_nak_retry_err), 2360 + MLX5_BYTE_OFF(query_q_counter_out, packet_seq_err), 2361 + MLX5_BYTE_OFF(query_q_counter_out, implied_nak_seq_err), 2362 + MLX5_BYTE_OFF(query_q_counter_out, local_ack_timeout_err), 2363 + }; 2364 + 2365 + static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev, 2366 + u8 port_num) 2367 + { 2368 + BUILD_BUG_ON(ARRAY_SIZE(names) != ARRAY_SIZE(stats_offsets)); 2369 + 2370 + /* We support only per port stats */ 2371 + if (port_num == 0) 2372 + return NULL; 2373 + 2374 + return rdma_alloc_hw_stats_struct(names, ARRAY_SIZE(names), 2375 + RDMA_HW_STATS_DEFAULT_LIFESPAN); 2376 + } 2377 + 2378 + static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, 2379 + struct rdma_hw_stats *stats, 2380 + u8 port, int index) 2381 + { 2382 + struct mlx5_ib_dev *dev = to_mdev(ibdev); 2383 + int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out); 2384 + void *out; 2385 + __be32 val; 2386 + int ret; 2387 + int i; 2388 + 2389 + if (!port || !stats) 2390 + return -ENOSYS; 2391 + 2392 + out = mlx5_vzalloc(outlen); 2393 + if (!out) 2394 + return -ENOMEM; 2395 + 2396 + ret = mlx5_core_query_q_counter(dev->mdev, 2397 + dev->port[port - 1].q_cnt_id, 0, 2398 + out, outlen); 2399 + if (ret) 2400 + goto free; 2401 + 2402 + for (i = 0; i < ARRAY_SIZE(names); i++) { 2403 + val = *(__be32 *)(out + stats_offsets[i]); 2404 + stats->value[i] = (u64)be32_to_cpu(val); 2405 + } 2406 + free: 2407 + kvfree(out); 2408 + return ARRAY_SIZE(names); 2409 + } 2410 + 2542 2411 static void *mlx5_ib_add(struct mlx5_core_dev *mdev) 2543 2412 { 2544 2413 struct mlx5_ib_dev *dev; ··· 2668 2323 2669 2324 dev->mdev = mdev; 2670 2325 2326 + dev->port = kcalloc(MLX5_CAP_GEN(mdev, num_ports), sizeof(*dev->port), 2327 + GFP_KERNEL); 2328 + if (!dev->port) 2329 + goto err_dealloc; 2330 + 2671 2331 rwlock_init(&dev->roce.netdev_lock); 2672 2332 err = get_port_caps(dev); 2673 2333 if (err) 2674 - goto err_dealloc; 2334 + goto err_free_port; 2675 2335 2676 2336 if (mlx5_use_mad_ifc(dev)) 2677 2337 get_ext_port_caps(dev); ··· 2771 2421 dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg; 2772 2422 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; 2773 2423 dev->ib_dev.get_port_immutable = mlx5_port_immutable; 2424 + dev->ib_dev.get_dev_fw_str = get_dev_fw_str; 2774 2425 if (mlx5_core_is_pf(mdev)) { 2775 2426 dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config; 2776 2427 dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state; 2777 2428 dev->ib_dev.get_vf_stats = mlx5_ib_get_vf_stats; 2778 2429 dev->ib_dev.set_vf_guid = mlx5_ib_set_vf_guid; 2779 2430 } 2431 + 2432 + dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext; 2780 2433 2781 2434 mlx5_ib_internal_fill_odp_caps(dev); 2782 2435 ··· 2789 2436 dev->ib_dev.uverbs_cmd_mask |= 2790 2437 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) | 2791 2438 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW); 2439 + } 2440 + 2441 + if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt) && 2442 + MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) { 2443 + dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats; 2444 + dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats; 2792 2445 } 2793 2446 2794 2447 if (MLX5_CAP_GEN(mdev, xrc)) { ··· 2809 2450 IB_LINK_LAYER_ETHERNET) { 2810 2451 dev->ib_dev.create_flow = mlx5_ib_create_flow; 2811 2452 dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow; 2453 + dev->ib_dev.create_wq = mlx5_ib_create_wq; 2454 + dev->ib_dev.modify_wq = mlx5_ib_modify_wq; 2455 + dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq; 2456 + dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table; 2457 + dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table; 2812 2458 dev->ib_dev.uverbs_ex_cmd_mask |= 2813 2459 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) | 2814 - (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW); 2460 + (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW) | 2461 + (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) | 2462 + (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) | 2463 + (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) | 2464 + (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) | 2465 + (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL); 2815 2466 } 2816 2467 err = init_node_data(dev); 2817 2468 if (err) ··· 2829 2460 2830 2461 mutex_init(&dev->flow_db.lock); 2831 2462 mutex_init(&dev->cap_mask_mutex); 2463 + INIT_LIST_HEAD(&dev->qp_list); 2464 + spin_lock_init(&dev->reset_flow_resource_lock); 2832 2465 2833 2466 if (ll == IB_LINK_LAYER_ETHERNET) { 2834 2467 err = mlx5_enable_roce(dev); ··· 2846 2475 if (err) 2847 2476 goto err_rsrc; 2848 2477 2849 - err = ib_register_device(&dev->ib_dev, NULL); 2478 + err = mlx5_ib_alloc_q_counters(dev); 2850 2479 if (err) 2851 2480 goto err_odp; 2481 + 2482 + err = ib_register_device(&dev->ib_dev, NULL); 2483 + if (err) 2484 + goto err_q_cnt; 2852 2485 2853 2486 err = create_umr_res(dev); 2854 2487 if (err) ··· 2875 2500 err_dev: 2876 2501 ib_unregister_device(&dev->ib_dev); 2877 2502 2503 + err_q_cnt: 2504 + mlx5_ib_dealloc_q_counters(dev); 2505 + 2878 2506 err_odp: 2879 2507 mlx5_ib_odp_remove_one(dev); 2880 2508 ··· 2887 2509 err_disable_roce: 2888 2510 if (ll == IB_LINK_LAYER_ETHERNET) 2889 2511 mlx5_disable_roce(dev); 2512 + 2513 + err_free_port: 2514 + kfree(dev->port); 2890 2515 2891 2516 err_dealloc: 2892 2517 ib_dealloc_device((struct ib_device *)dev); ··· 2903 2522 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1); 2904 2523 2905 2524 ib_unregister_device(&dev->ib_dev); 2525 + mlx5_ib_dealloc_q_counters(dev); 2906 2526 destroy_umrc_res(dev); 2907 2527 mlx5_ib_odp_remove_one(dev); 2908 2528 destroy_dev_resources(&dev->devr); 2909 2529 if (ll == IB_LINK_LAYER_ETHERNET) 2910 2530 mlx5_disable_roce(dev); 2531 + kfree(dev->port); 2911 2532 ib_dealloc_device(&dev->ib_dev); 2912 2533 } 2913 2534
+74
drivers/infiniband/hw/mlx5/mlx5_ib.h
··· 105 105 MLX5_CQE_VERSION_V1, 106 106 }; 107 107 108 + struct mlx5_ib_vma_private_data { 109 + struct list_head list; 110 + struct vm_area_struct *vma; 111 + }; 112 + 108 113 struct mlx5_ib_ucontext { 109 114 struct ib_ucontext ibucontext; 110 115 struct list_head db_page_list; ··· 121 116 u8 cqe_version; 122 117 /* Transport Domain number */ 123 118 u32 tdn; 119 + struct list_head vma_private_list; 124 120 }; 125 121 126 122 static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext) ··· 223 217 void *qend; 224 218 }; 225 219 220 + struct mlx5_ib_rwq { 221 + struct ib_wq ibwq; 222 + u32 rqn; 223 + u32 rq_num_pas; 224 + u32 log_rq_stride; 225 + u32 log_rq_size; 226 + u32 rq_page_offset; 227 + u32 log_page_size; 228 + struct ib_umem *umem; 229 + size_t buf_size; 230 + unsigned int page_shift; 231 + int create_type; 232 + struct mlx5_db db; 233 + u32 user_index; 234 + u32 wqe_count; 235 + u32 wqe_shift; 236 + int wq_sig; 237 + }; 238 + 226 239 enum { 227 240 MLX5_QP_USER, 228 241 MLX5_QP_KERNEL, 229 242 MLX5_QP_EMPTY 243 + }; 244 + 245 + enum { 246 + MLX5_WQ_USER, 247 + MLX5_WQ_KERNEL 248 + }; 249 + 250 + struct mlx5_ib_rwq_ind_table { 251 + struct ib_rwq_ind_table ib_rwq_ind_tbl; 252 + u32 rqtn; 230 253 }; 231 254 232 255 /* ··· 301 266 u8 resp_depth; 302 267 }; 303 268 269 + struct mlx5_ib_rss_qp { 270 + u32 tirn; 271 + }; 272 + 304 273 struct mlx5_ib_rq { 305 274 struct mlx5_ib_qp_base base; 306 275 struct mlx5_ib_wq *rq; ··· 333 294 union { 334 295 struct mlx5_ib_qp_trans trans_qp; 335 296 struct mlx5_ib_raw_packet_qp raw_packet_qp; 297 + struct mlx5_ib_rss_qp rss_qp; 336 298 }; 337 299 struct mlx5_buf buf; 338 300 ··· 380 340 spinlock_t disable_page_faults_lock; 381 341 struct mlx5_ib_pfault pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS]; 382 342 #endif 343 + struct list_head qps_list; 344 + struct list_head cq_recv_list; 345 + struct list_head cq_send_list; 383 346 }; 384 347 385 348 struct mlx5_ib_cq_buf { ··· 444 401 struct mlx5_ib_cq_buf *resize_buf; 445 402 struct ib_umem *resize_umem; 446 403 int cqe_size; 404 + struct list_head list_send_qp; 405 + struct list_head list_recv_qp; 447 406 u32 create_flags; 448 407 struct list_head wc_list; 449 408 enum ib_cq_notify_flags notify_flags; ··· 591 546 struct mutex mutex; 592 547 }; 593 548 549 + struct mlx5_ib_port { 550 + u16 q_cnt_id; 551 + }; 552 + 594 553 struct mlx5_roce { 595 554 /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL 596 555 * netdev pointer ··· 630 581 struct srcu_struct mr_srcu; 631 582 #endif 632 583 struct mlx5_ib_flow_db flow_db; 584 + /* protect resources needed as part of reset flow */ 585 + spinlock_t reset_flow_resource_lock; 586 + struct list_head qp_list; 587 + /* Array with num_ports elements */ 588 + struct mlx5_ib_port *port; 633 589 }; 634 590 635 591 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) ··· 680 626 static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp) 681 627 { 682 628 return container_of(ibqp, struct mlx5_ib_qp, ibqp); 629 + } 630 + 631 + static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq) 632 + { 633 + return container_of(ibwq, struct mlx5_ib_rwq, ibwq); 634 + } 635 + 636 + static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl) 637 + { 638 + return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl); 683 639 } 684 640 685 641 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq) ··· 826 762 int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift); 827 763 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, 828 764 struct ib_mr_status *mr_status); 765 + struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, 766 + struct ib_wq_init_attr *init_attr, 767 + struct ib_udata *udata); 768 + int mlx5_ib_destroy_wq(struct ib_wq *wq); 769 + int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, 770 + u32 wq_attr_mask, struct ib_udata *udata); 771 + struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, 772 + struct ib_rwq_ind_table_init_attr *init_attr, 773 + struct ib_udata *udata); 774 + int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); 829 775 830 776 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 831 777 extern struct workqueue_struct *mlx5_ib_page_fault_wq;
+4
drivers/infiniband/hw/mlx5/mr.c
··· 1193 1193 1194 1194 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) 1195 1195 { 1196 + struct mlx5_core_dev *mdev = dev->mdev; 1196 1197 struct umr_common *umrc = &dev->umrc; 1197 1198 struct mlx5_ib_umr_context umr_context; 1198 1199 struct mlx5_umr_wr umrwr = {}; 1199 1200 struct ib_send_wr *bad; 1200 1201 int err; 1202 + 1203 + if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) 1204 + return 0; 1201 1205 1202 1206 mlx5_ib_init_umr_context(&umr_context); 1203 1207
+672 -19
drivers/infiniband/hw/mlx5/qp.c
··· 77 77 u8 rsvd0[16]; 78 78 }; 79 79 80 + static void get_cqs(enum ib_qp_type qp_type, 81 + struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq, 82 + struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq); 83 + 80 84 static int is_qp0(enum ib_qp_type qp_type) 81 85 { 82 86 return qp_type == IB_QPT_SMI; ··· 613 609 } 614 610 } 615 611 612 + static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, 613 + struct mlx5_ib_cq *recv_cq); 614 + static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, 615 + struct mlx5_ib_cq *recv_cq); 616 + 616 617 static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn) 617 618 { 618 619 return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index; ··· 655 646 ib_umem_release(*umem); 656 647 *umem = NULL; 657 648 649 + return err; 650 + } 651 + 652 + static void destroy_user_rq(struct ib_pd *pd, struct mlx5_ib_rwq *rwq) 653 + { 654 + struct mlx5_ib_ucontext *context; 655 + 656 + context = to_mucontext(pd->uobject->context); 657 + mlx5_ib_db_unmap_user(context, &rwq->db); 658 + if (rwq->umem) 659 + ib_umem_release(rwq->umem); 660 + } 661 + 662 + static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd, 663 + struct mlx5_ib_rwq *rwq, 664 + struct mlx5_ib_create_wq *ucmd) 665 + { 666 + struct mlx5_ib_ucontext *context; 667 + int page_shift = 0; 668 + int npages; 669 + u32 offset = 0; 670 + int ncont = 0; 671 + int err; 672 + 673 + if (!ucmd->buf_addr) 674 + return -EINVAL; 675 + 676 + context = to_mucontext(pd->uobject->context); 677 + rwq->umem = ib_umem_get(pd->uobject->context, ucmd->buf_addr, 678 + rwq->buf_size, 0, 0); 679 + if (IS_ERR(rwq->umem)) { 680 + mlx5_ib_dbg(dev, "umem_get failed\n"); 681 + err = PTR_ERR(rwq->umem); 682 + return err; 683 + } 684 + 685 + mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, &npages, &page_shift, 686 + &ncont, NULL); 687 + err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift, 688 + &rwq->rq_page_offset); 689 + if (err) { 690 + mlx5_ib_warn(dev, "bad offset\n"); 691 + goto err_umem; 692 + } 693 + 694 + rwq->rq_num_pas = ncont; 695 + rwq->page_shift = page_shift; 696 + rwq->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT; 697 + rwq->wq_sig = !!(ucmd->flags & MLX5_WQ_FLAG_SIGNATURE); 698 + 699 + mlx5_ib_dbg(dev, "addr 0x%llx, size %zd, npages %d, page_shift %d, ncont %d, offset %d\n", 700 + (unsigned long long)ucmd->buf_addr, rwq->buf_size, 701 + npages, page_shift, ncont, offset); 702 + 703 + err = mlx5_ib_db_map_user(context, ucmd->db_addr, &rwq->db); 704 + if (err) { 705 + mlx5_ib_dbg(dev, "map failed\n"); 706 + goto err_umem; 707 + } 708 + 709 + rwq->create_type = MLX5_WQ_USER; 710 + return 0; 711 + 712 + err_umem: 713 + ib_umem_release(rwq->umem); 658 714 return err; 659 715 } 660 716 ··· 1275 1201 rq->doorbell = &qp->db; 1276 1202 } 1277 1203 1204 + static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) 1205 + { 1206 + mlx5_core_destroy_tir(dev->mdev, qp->rss_qp.tirn); 1207 + } 1208 + 1209 + static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 1210 + struct ib_pd *pd, 1211 + struct ib_qp_init_attr *init_attr, 1212 + struct ib_udata *udata) 1213 + { 1214 + struct ib_uobject *uobj = pd->uobject; 1215 + struct ib_ucontext *ucontext = uobj->context; 1216 + struct mlx5_ib_ucontext *mucontext = to_mucontext(ucontext); 1217 + struct mlx5_ib_create_qp_resp resp = {}; 1218 + int inlen; 1219 + int err; 1220 + u32 *in; 1221 + void *tirc; 1222 + void *hfso; 1223 + u32 selected_fields = 0; 1224 + size_t min_resp_len; 1225 + u32 tdn = mucontext->tdn; 1226 + struct mlx5_ib_create_qp_rss ucmd = {}; 1227 + size_t required_cmd_sz; 1228 + 1229 + if (init_attr->qp_type != IB_QPT_RAW_PACKET) 1230 + return -EOPNOTSUPP; 1231 + 1232 + if (init_attr->create_flags || init_attr->send_cq) 1233 + return -EINVAL; 1234 + 1235 + min_resp_len = offsetof(typeof(resp), uuar_index) + sizeof(resp.uuar_index); 1236 + if (udata->outlen < min_resp_len) 1237 + return -EINVAL; 1238 + 1239 + required_cmd_sz = offsetof(typeof(ucmd), reserved1) + sizeof(ucmd.reserved1); 1240 + if (udata->inlen < required_cmd_sz) { 1241 + mlx5_ib_dbg(dev, "invalid inlen\n"); 1242 + return -EINVAL; 1243 + } 1244 + 1245 + if (udata->inlen > sizeof(ucmd) && 1246 + !ib_is_udata_cleared(udata, sizeof(ucmd), 1247 + udata->inlen - sizeof(ucmd))) { 1248 + mlx5_ib_dbg(dev, "inlen is not supported\n"); 1249 + return -EOPNOTSUPP; 1250 + } 1251 + 1252 + if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) { 1253 + mlx5_ib_dbg(dev, "copy failed\n"); 1254 + return -EFAULT; 1255 + } 1256 + 1257 + if (ucmd.comp_mask) { 1258 + mlx5_ib_dbg(dev, "invalid comp mask\n"); 1259 + return -EOPNOTSUPP; 1260 + } 1261 + 1262 + if (memchr_inv(ucmd.reserved, 0, sizeof(ucmd.reserved)) || ucmd.reserved1) { 1263 + mlx5_ib_dbg(dev, "invalid reserved\n"); 1264 + return -EOPNOTSUPP; 1265 + } 1266 + 1267 + err = ib_copy_to_udata(udata, &resp, min_resp_len); 1268 + if (err) { 1269 + mlx5_ib_dbg(dev, "copy failed\n"); 1270 + return -EINVAL; 1271 + } 1272 + 1273 + inlen = MLX5_ST_SZ_BYTES(create_tir_in); 1274 + in = mlx5_vzalloc(inlen); 1275 + if (!in) 1276 + return -ENOMEM; 1277 + 1278 + tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); 1279 + MLX5_SET(tirc, tirc, disp_type, 1280 + MLX5_TIRC_DISP_TYPE_INDIRECT); 1281 + MLX5_SET(tirc, tirc, indirect_table, 1282 + init_attr->rwq_ind_tbl->ind_tbl_num); 1283 + MLX5_SET(tirc, tirc, transport_domain, tdn); 1284 + 1285 + hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); 1286 + switch (ucmd.rx_hash_function) { 1287 + case MLX5_RX_HASH_FUNC_TOEPLITZ: 1288 + { 1289 + void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key); 1290 + size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key); 1291 + 1292 + if (len != ucmd.rx_key_len) { 1293 + err = -EINVAL; 1294 + goto err; 1295 + } 1296 + 1297 + MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ); 1298 + MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); 1299 + memcpy(rss_key, ucmd.rx_hash_key, len); 1300 + break; 1301 + } 1302 + default: 1303 + err = -EOPNOTSUPP; 1304 + goto err; 1305 + } 1306 + 1307 + if (!ucmd.rx_hash_fields_mask) { 1308 + /* special case when this TIR serves as steering entry without hashing */ 1309 + if (!init_attr->rwq_ind_tbl->log_ind_tbl_size) 1310 + goto create_tir; 1311 + err = -EINVAL; 1312 + goto err; 1313 + } 1314 + 1315 + if (((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) || 1316 + (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) && 1317 + ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) || 1318 + (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))) { 1319 + err = -EINVAL; 1320 + goto err; 1321 + } 1322 + 1323 + /* If none of IPV4 & IPV6 SRC/DST was set - this bit field is ignored */ 1324 + if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) || 1325 + (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) 1326 + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 1327 + MLX5_L3_PROT_TYPE_IPV4); 1328 + else if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) || 1329 + (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6)) 1330 + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 1331 + MLX5_L3_PROT_TYPE_IPV6); 1332 + 1333 + if (((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) || 1334 + (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP)) && 1335 + ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) || 1336 + (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))) { 1337 + err = -EINVAL; 1338 + goto err; 1339 + } 1340 + 1341 + /* If none of TCP & UDP SRC/DST was set - this bit field is ignored */ 1342 + if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) || 1343 + (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP)) 1344 + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 1345 + MLX5_L4_PROT_TYPE_TCP); 1346 + else if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) || 1347 + (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP)) 1348 + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 1349 + MLX5_L4_PROT_TYPE_UDP); 1350 + 1351 + if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) || 1352 + (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6)) 1353 + selected_fields |= MLX5_HASH_FIELD_SEL_SRC_IP; 1354 + 1355 + if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4) || 1356 + (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6)) 1357 + selected_fields |= MLX5_HASH_FIELD_SEL_DST_IP; 1358 + 1359 + if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) || 1360 + (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP)) 1361 + selected_fields |= MLX5_HASH_FIELD_SEL_L4_SPORT; 1362 + 1363 + if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP) || 1364 + (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP)) 1365 + selected_fields |= MLX5_HASH_FIELD_SEL_L4_DPORT; 1366 + 1367 + MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields); 1368 + 1369 + create_tir: 1370 + err = mlx5_core_create_tir(dev->mdev, in, inlen, &qp->rss_qp.tirn); 1371 + 1372 + if (err) 1373 + goto err; 1374 + 1375 + kvfree(in); 1376 + /* qpn is reserved for that QP */ 1377 + qp->trans_qp.base.mqp.qpn = 0; 1378 + return 0; 1379 + 1380 + err: 1381 + kvfree(in); 1382 + return err; 1383 + } 1384 + 1278 1385 static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, 1279 1386 struct ib_qp_init_attr *init_attr, 1280 1387 struct ib_udata *udata, struct mlx5_ib_qp *qp) ··· 1466 1211 struct mlx5_ib_create_qp_resp resp; 1467 1212 struct mlx5_create_qp_mbox_in *in; 1468 1213 struct mlx5_ib_create_qp ucmd; 1214 + struct mlx5_ib_cq *send_cq; 1215 + struct mlx5_ib_cq *recv_cq; 1216 + unsigned long flags; 1469 1217 int inlen = sizeof(*in); 1470 1218 int err; 1471 1219 u32 uidx = MLX5_IB_DEFAULT_UIDX; ··· 1484 1226 mutex_init(&qp->mutex); 1485 1227 spin_lock_init(&qp->sq.lock); 1486 1228 spin_lock_init(&qp->rq.lock); 1229 + 1230 + if (init_attr->rwq_ind_tbl) { 1231 + if (!udata) 1232 + return -ENOSYS; 1233 + 1234 + err = create_rss_raw_qp_tir(dev, qp, pd, init_attr, udata); 1235 + return err; 1236 + } 1487 1237 1488 1238 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { 1489 1239 if (!MLX5_CAP_GEN(mdev, block_lb_mc)) { ··· 1726 1460 base->container_mibqp = qp; 1727 1461 base->mqp.event = mlx5_ib_qp_event; 1728 1462 1463 + get_cqs(init_attr->qp_type, init_attr->send_cq, init_attr->recv_cq, 1464 + &send_cq, &recv_cq); 1465 + spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); 1466 + mlx5_ib_lock_cqs(send_cq, recv_cq); 1467 + /* Maintain device to QPs access, needed for further handling via reset 1468 + * flow 1469 + */ 1470 + list_add_tail(&qp->qps_list, &dev->qp_list); 1471 + /* Maintain CQ to QPs access, needed for further handling via reset flow 1472 + */ 1473 + if (send_cq) 1474 + list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp); 1475 + if (recv_cq) 1476 + list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp); 1477 + mlx5_ib_unlock_cqs(send_cq, recv_cq); 1478 + spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); 1479 + 1729 1480 return 0; 1730 1481 1731 1482 err_create: ··· 1761 1478 if (send_cq) { 1762 1479 if (recv_cq) { 1763 1480 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { 1764 - spin_lock_irq(&send_cq->lock); 1481 + spin_lock(&send_cq->lock); 1765 1482 spin_lock_nested(&recv_cq->lock, 1766 1483 SINGLE_DEPTH_NESTING); 1767 1484 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { 1768 - spin_lock_irq(&send_cq->lock); 1485 + spin_lock(&send_cq->lock); 1769 1486 __acquire(&recv_cq->lock); 1770 1487 } else { 1771 - spin_lock_irq(&recv_cq->lock); 1488 + spin_lock(&recv_cq->lock); 1772 1489 spin_lock_nested(&send_cq->lock, 1773 1490 SINGLE_DEPTH_NESTING); 1774 1491 } 1775 1492 } else { 1776 - spin_lock_irq(&send_cq->lock); 1493 + spin_lock(&send_cq->lock); 1777 1494 __acquire(&recv_cq->lock); 1778 1495 } 1779 1496 } else if (recv_cq) { 1780 - spin_lock_irq(&recv_cq->lock); 1497 + spin_lock(&recv_cq->lock); 1781 1498 __acquire(&send_cq->lock); 1782 1499 } else { 1783 1500 __acquire(&send_cq->lock); ··· 1792 1509 if (recv_cq) { 1793 1510 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { 1794 1511 spin_unlock(&recv_cq->lock); 1795 - spin_unlock_irq(&send_cq->lock); 1512 + spin_unlock(&send_cq->lock); 1796 1513 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { 1797 1514 __release(&recv_cq->lock); 1798 - spin_unlock_irq(&send_cq->lock); 1515 + spin_unlock(&send_cq->lock); 1799 1516 } else { 1800 1517 spin_unlock(&send_cq->lock); 1801 - spin_unlock_irq(&recv_cq->lock); 1518 + spin_unlock(&recv_cq->lock); 1802 1519 } 1803 1520 } else { 1804 1521 __release(&recv_cq->lock); 1805 - spin_unlock_irq(&send_cq->lock); 1522 + spin_unlock(&send_cq->lock); 1806 1523 } 1807 1524 } else if (recv_cq) { 1808 1525 __release(&send_cq->lock); 1809 - spin_unlock_irq(&recv_cq->lock); 1526 + spin_unlock(&recv_cq->lock); 1810 1527 } else { 1811 1528 __release(&recv_cq->lock); 1812 1529 __release(&send_cq->lock); ··· 1818 1535 return to_mpd(qp->ibqp.pd); 1819 1536 } 1820 1537 1821 - static void get_cqs(struct mlx5_ib_qp *qp, 1538 + static void get_cqs(enum ib_qp_type qp_type, 1539 + struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq, 1822 1540 struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq) 1823 1541 { 1824 - switch (qp->ibqp.qp_type) { 1542 + switch (qp_type) { 1825 1543 case IB_QPT_XRC_TGT: 1826 1544 *send_cq = NULL; 1827 1545 *recv_cq = NULL; 1828 1546 break; 1829 1547 case MLX5_IB_QPT_REG_UMR: 1830 1548 case IB_QPT_XRC_INI: 1831 - *send_cq = to_mcq(qp->ibqp.send_cq); 1549 + *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL; 1832 1550 *recv_cq = NULL; 1833 1551 break; 1834 1552 ··· 1841 1557 case IB_QPT_RAW_IPV6: 1842 1558 case IB_QPT_RAW_ETHERTYPE: 1843 1559 case IB_QPT_RAW_PACKET: 1844 - *send_cq = to_mcq(qp->ibqp.send_cq); 1845 - *recv_cq = to_mcq(qp->ibqp.recv_cq); 1560 + *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL; 1561 + *recv_cq = ib_recv_cq ? to_mcq(ib_recv_cq) : NULL; 1846 1562 break; 1847 1563 1848 1564 case IB_QPT_MAX: ··· 1861 1577 struct mlx5_ib_cq *send_cq, *recv_cq; 1862 1578 struct mlx5_ib_qp_base *base = &qp->trans_qp.base; 1863 1579 struct mlx5_modify_qp_mbox_in *in; 1580 + unsigned long flags; 1864 1581 int err; 1582 + 1583 + if (qp->ibqp.rwq_ind_tbl) { 1584 + destroy_rss_raw_qp_tir(dev, qp); 1585 + return; 1586 + } 1865 1587 1866 1588 base = qp->ibqp.qp_type == IB_QPT_RAW_PACKET ? 1867 1589 &qp->raw_packet_qp.rq.base : ··· 1892 1602 base->mqp.qpn); 1893 1603 } 1894 1604 1895 - get_cqs(qp, &send_cq, &recv_cq); 1605 + get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq, 1606 + &send_cq, &recv_cq); 1607 + 1608 + spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); 1609 + mlx5_ib_lock_cqs(send_cq, recv_cq); 1610 + /* del from lists under both locks above to protect reset flow paths */ 1611 + list_del(&qp->qps_list); 1612 + if (send_cq) 1613 + list_del(&qp->cq_send_list); 1614 + 1615 + if (recv_cq) 1616 + list_del(&qp->cq_recv_list); 1896 1617 1897 1618 if (qp->create_type == MLX5_QP_KERNEL) { 1898 - mlx5_ib_lock_cqs(send_cq, recv_cq); 1899 1619 __mlx5_ib_cq_clean(recv_cq, base->mqp.qpn, 1900 1620 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 1901 1621 if (send_cq != recv_cq) 1902 1622 __mlx5_ib_cq_clean(send_cq, base->mqp.qpn, 1903 1623 NULL); 1904 - mlx5_ib_unlock_cqs(send_cq, recv_cq); 1905 1624 } 1625 + mlx5_ib_unlock_cqs(send_cq, recv_cq); 1626 + spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); 1906 1627 1907 1628 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) { 1908 1629 destroy_raw_packet_qp(dev, qp); ··· 2601 2300 } 2602 2301 2603 2302 pd = get_pd(qp); 2604 - get_cqs(qp, &send_cq, &recv_cq); 2303 + get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq, 2304 + &send_cq, &recv_cq); 2605 2305 2606 2306 context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn); 2607 2307 context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0; ··· 2650 2348 sqd_event = 1; 2651 2349 else 2652 2350 sqd_event = 0; 2351 + 2352 + if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 2353 + u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num : 2354 + qp->port) - 1; 2355 + struct mlx5_ib_port *mibport = &dev->port[port_num]; 2356 + 2357 + context->qp_counter_set_usr_page |= 2358 + cpu_to_be32(mibport->q_cnt_id << 16); 2359 + } 2653 2360 2654 2361 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 2655 2362 context->sq_crq_size |= cpu_to_be16(1 << 4); ··· 2749 2438 int err = -EINVAL; 2750 2439 int port; 2751 2440 enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED; 2441 + 2442 + if (ibqp->rwq_ind_tbl) 2443 + return -ENOSYS; 2752 2444 2753 2445 if (unlikely(ibqp->qp_type == IB_QPT_GSI)) 2754 2446 return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask); ··· 3710 3396 { 3711 3397 struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */ 3712 3398 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 3399 + struct mlx5_core_dev *mdev = dev->mdev; 3713 3400 struct mlx5_ib_qp *qp; 3714 3401 struct mlx5_ib_mr *mr; 3715 3402 struct mlx5_wqe_data_seg *dpseg; ··· 3737 3422 qend = qp->sq.qend; 3738 3423 3739 3424 spin_lock_irqsave(&qp->sq.lock, flags); 3425 + 3426 + if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 3427 + err = -EIO; 3428 + *bad_wr = wr; 3429 + nreq = 0; 3430 + goto out; 3431 + } 3740 3432 3741 3433 for (nreq = 0; wr; nreq++, wr = wr->next) { 3742 3434 if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) { ··· 4046 3724 struct mlx5_ib_qp *qp = to_mqp(ibqp); 4047 3725 struct mlx5_wqe_data_seg *scat; 4048 3726 struct mlx5_rwqe_sig *sig; 3727 + struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 3728 + struct mlx5_core_dev *mdev = dev->mdev; 4049 3729 unsigned long flags; 4050 3730 int err = 0; 4051 3731 int nreq; ··· 4058 3734 return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr); 4059 3735 4060 3736 spin_lock_irqsave(&qp->rq.lock, flags); 3737 + 3738 + if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 3739 + err = -EIO; 3740 + *bad_wr = wr; 3741 + nreq = 0; 3742 + goto out; 3743 + } 4061 3744 4062 3745 ind = qp->rq.head & (qp->rq.wqe_cnt - 1); 4063 3746 ··· 4385 4054 int err = 0; 4386 4055 u8 raw_packet_qp_state; 4387 4056 4057 + if (ibqp->rwq_ind_tbl) 4058 + return -ENOSYS; 4059 + 4388 4060 if (unlikely(ibqp->qp_type == IB_QPT_GSI)) 4389 4061 return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask, 4390 4062 qp_init_attr); ··· 4496 4162 kfree(xrcd); 4497 4163 4498 4164 return 0; 4165 + } 4166 + 4167 + static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, 4168 + struct ib_wq_init_attr *init_attr) 4169 + { 4170 + struct mlx5_ib_dev *dev; 4171 + __be64 *rq_pas0; 4172 + void *in; 4173 + void *rqc; 4174 + void *wq; 4175 + int inlen; 4176 + int err; 4177 + 4178 + dev = to_mdev(pd->device); 4179 + 4180 + inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas; 4181 + in = mlx5_vzalloc(inlen); 4182 + if (!in) 4183 + return -ENOMEM; 4184 + 4185 + rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); 4186 + MLX5_SET(rqc, rqc, mem_rq_type, 4187 + MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE); 4188 + MLX5_SET(rqc, rqc, user_index, rwq->user_index); 4189 + MLX5_SET(rqc, rqc, cqn, to_mcq(init_attr->cq)->mcq.cqn); 4190 + MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); 4191 + MLX5_SET(rqc, rqc, flush_in_error_en, 1); 4192 + wq = MLX5_ADDR_OF(rqc, rqc, wq); 4193 + MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); 4194 + MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); 4195 + MLX5_SET(wq, wq, log_wq_stride, rwq->log_rq_stride); 4196 + MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size); 4197 + MLX5_SET(wq, wq, pd, to_mpd(pd)->pdn); 4198 + MLX5_SET(wq, wq, page_offset, rwq->rq_page_offset); 4199 + MLX5_SET(wq, wq, log_wq_pg_sz, rwq->log_page_size); 4200 + MLX5_SET(wq, wq, wq_signature, rwq->wq_sig); 4201 + MLX5_SET64(wq, wq, dbr_addr, rwq->db.dma); 4202 + rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas); 4203 + mlx5_ib_populate_pas(dev, rwq->umem, rwq->page_shift, rq_pas0, 0); 4204 + err = mlx5_core_create_rq(dev->mdev, in, inlen, &rwq->rqn); 4205 + kvfree(in); 4206 + return err; 4207 + } 4208 + 4209 + static int set_user_rq_size(struct mlx5_ib_dev *dev, 4210 + struct ib_wq_init_attr *wq_init_attr, 4211 + struct mlx5_ib_create_wq *ucmd, 4212 + struct mlx5_ib_rwq *rwq) 4213 + { 4214 + /* Sanity check RQ size before proceeding */ 4215 + if (wq_init_attr->max_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_wq_sz))) 4216 + return -EINVAL; 4217 + 4218 + if (!ucmd->rq_wqe_count) 4219 + return -EINVAL; 4220 + 4221 + rwq->wqe_count = ucmd->rq_wqe_count; 4222 + rwq->wqe_shift = ucmd->rq_wqe_shift; 4223 + rwq->buf_size = (rwq->wqe_count << rwq->wqe_shift); 4224 + rwq->log_rq_stride = rwq->wqe_shift; 4225 + rwq->log_rq_size = ilog2(rwq->wqe_count); 4226 + return 0; 4227 + } 4228 + 4229 + static int prepare_user_rq(struct ib_pd *pd, 4230 + struct ib_wq_init_attr *init_attr, 4231 + struct ib_udata *udata, 4232 + struct mlx5_ib_rwq *rwq) 4233 + { 4234 + struct mlx5_ib_dev *dev = to_mdev(pd->device); 4235 + struct mlx5_ib_create_wq ucmd = {}; 4236 + int err; 4237 + size_t required_cmd_sz; 4238 + 4239 + required_cmd_sz = offsetof(typeof(ucmd), reserved) + sizeof(ucmd.reserved); 4240 + if (udata->inlen < required_cmd_sz) { 4241 + mlx5_ib_dbg(dev, "invalid inlen\n"); 4242 + return -EINVAL; 4243 + } 4244 + 4245 + if (udata->inlen > sizeof(ucmd) && 4246 + !ib_is_udata_cleared(udata, sizeof(ucmd), 4247 + udata->inlen - sizeof(ucmd))) { 4248 + mlx5_ib_dbg(dev, "inlen is not supported\n"); 4249 + return -EOPNOTSUPP; 4250 + } 4251 + 4252 + if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) { 4253 + mlx5_ib_dbg(dev, "copy failed\n"); 4254 + return -EFAULT; 4255 + } 4256 + 4257 + if (ucmd.comp_mask) { 4258 + mlx5_ib_dbg(dev, "invalid comp mask\n"); 4259 + return -EOPNOTSUPP; 4260 + } 4261 + 4262 + if (ucmd.reserved) { 4263 + mlx5_ib_dbg(dev, "invalid reserved\n"); 4264 + return -EOPNOTSUPP; 4265 + } 4266 + 4267 + err = set_user_rq_size(dev, init_attr, &ucmd, rwq); 4268 + if (err) { 4269 + mlx5_ib_dbg(dev, "err %d\n", err); 4270 + return err; 4271 + } 4272 + 4273 + err = create_user_rq(dev, pd, rwq, &ucmd); 4274 + if (err) { 4275 + mlx5_ib_dbg(dev, "err %d\n", err); 4276 + if (err) 4277 + return err; 4278 + } 4279 + 4280 + rwq->user_index = ucmd.user_index; 4281 + return 0; 4282 + } 4283 + 4284 + struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, 4285 + struct ib_wq_init_attr *init_attr, 4286 + struct ib_udata *udata) 4287 + { 4288 + struct mlx5_ib_dev *dev; 4289 + struct mlx5_ib_rwq *rwq; 4290 + struct mlx5_ib_create_wq_resp resp = {}; 4291 + size_t min_resp_len; 4292 + int err; 4293 + 4294 + if (!udata) 4295 + return ERR_PTR(-ENOSYS); 4296 + 4297 + min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); 4298 + if (udata->outlen && udata->outlen < min_resp_len) 4299 + return ERR_PTR(-EINVAL); 4300 + 4301 + dev = to_mdev(pd->device); 4302 + switch (init_attr->wq_type) { 4303 + case IB_WQT_RQ: 4304 + rwq = kzalloc(sizeof(*rwq), GFP_KERNEL); 4305 + if (!rwq) 4306 + return ERR_PTR(-ENOMEM); 4307 + err = prepare_user_rq(pd, init_attr, udata, rwq); 4308 + if (err) 4309 + goto err; 4310 + err = create_rq(rwq, pd, init_attr); 4311 + if (err) 4312 + goto err_user_rq; 4313 + break; 4314 + default: 4315 + mlx5_ib_dbg(dev, "unsupported wq type %d\n", 4316 + init_attr->wq_type); 4317 + return ERR_PTR(-EINVAL); 4318 + } 4319 + 4320 + rwq->ibwq.wq_num = rwq->rqn; 4321 + rwq->ibwq.state = IB_WQS_RESET; 4322 + if (udata->outlen) { 4323 + resp.response_length = offsetof(typeof(resp), response_length) + 4324 + sizeof(resp.response_length); 4325 + err = ib_copy_to_udata(udata, &resp, resp.response_length); 4326 + if (err) 4327 + goto err_copy; 4328 + } 4329 + 4330 + return &rwq->ibwq; 4331 + 4332 + err_copy: 4333 + mlx5_core_destroy_rq(dev->mdev, rwq->rqn); 4334 + err_user_rq: 4335 + destroy_user_rq(pd, rwq); 4336 + err: 4337 + kfree(rwq); 4338 + return ERR_PTR(err); 4339 + } 4340 + 4341 + int mlx5_ib_destroy_wq(struct ib_wq *wq) 4342 + { 4343 + struct mlx5_ib_dev *dev = to_mdev(wq->device); 4344 + struct mlx5_ib_rwq *rwq = to_mrwq(wq); 4345 + 4346 + mlx5_core_destroy_rq(dev->mdev, rwq->rqn); 4347 + destroy_user_rq(wq->pd, rwq); 4348 + kfree(rwq); 4349 + 4350 + return 0; 4351 + } 4352 + 4353 + struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, 4354 + struct ib_rwq_ind_table_init_attr *init_attr, 4355 + struct ib_udata *udata) 4356 + { 4357 + struct mlx5_ib_dev *dev = to_mdev(device); 4358 + struct mlx5_ib_rwq_ind_table *rwq_ind_tbl; 4359 + int sz = 1 << init_attr->log_ind_tbl_size; 4360 + struct mlx5_ib_create_rwq_ind_tbl_resp resp = {}; 4361 + size_t min_resp_len; 4362 + int inlen; 4363 + int err; 4364 + int i; 4365 + u32 *in; 4366 + void *rqtc; 4367 + 4368 + if (udata->inlen > 0 && 4369 + !ib_is_udata_cleared(udata, 0, 4370 + udata->inlen)) 4371 + return ERR_PTR(-EOPNOTSUPP); 4372 + 4373 + min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); 4374 + if (udata->outlen && udata->outlen < min_resp_len) 4375 + return ERR_PTR(-EINVAL); 4376 + 4377 + rwq_ind_tbl = kzalloc(sizeof(*rwq_ind_tbl), GFP_KERNEL); 4378 + if (!rwq_ind_tbl) 4379 + return ERR_PTR(-ENOMEM); 4380 + 4381 + inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; 4382 + in = mlx5_vzalloc(inlen); 4383 + if (!in) { 4384 + err = -ENOMEM; 4385 + goto err; 4386 + } 4387 + 4388 + rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); 4389 + 4390 + MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); 4391 + MLX5_SET(rqtc, rqtc, rqt_max_size, sz); 4392 + 4393 + for (i = 0; i < sz; i++) 4394 + MLX5_SET(rqtc, rqtc, rq_num[i], init_attr->ind_tbl[i]->wq_num); 4395 + 4396 + err = mlx5_core_create_rqt(dev->mdev, in, inlen, &rwq_ind_tbl->rqtn); 4397 + kvfree(in); 4398 + 4399 + if (err) 4400 + goto err; 4401 + 4402 + rwq_ind_tbl->ib_rwq_ind_tbl.ind_tbl_num = rwq_ind_tbl->rqtn; 4403 + if (udata->outlen) { 4404 + resp.response_length = offsetof(typeof(resp), response_length) + 4405 + sizeof(resp.response_length); 4406 + err = ib_copy_to_udata(udata, &resp, resp.response_length); 4407 + if (err) 4408 + goto err_copy; 4409 + } 4410 + 4411 + return &rwq_ind_tbl->ib_rwq_ind_tbl; 4412 + 4413 + err_copy: 4414 + mlx5_core_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn); 4415 + err: 4416 + kfree(rwq_ind_tbl); 4417 + return ERR_PTR(err); 4418 + } 4419 + 4420 + int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl) 4421 + { 4422 + struct mlx5_ib_rwq_ind_table *rwq_ind_tbl = to_mrwq_ind_table(ib_rwq_ind_tbl); 4423 + struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_tbl->device); 4424 + 4425 + mlx5_core_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn); 4426 + 4427 + kfree(rwq_ind_tbl); 4428 + return 0; 4429 + } 4430 + 4431 + int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, 4432 + u32 wq_attr_mask, struct ib_udata *udata) 4433 + { 4434 + struct mlx5_ib_dev *dev = to_mdev(wq->device); 4435 + struct mlx5_ib_rwq *rwq = to_mrwq(wq); 4436 + struct mlx5_ib_modify_wq ucmd = {}; 4437 + size_t required_cmd_sz; 4438 + int curr_wq_state; 4439 + int wq_state; 4440 + int inlen; 4441 + int err; 4442 + void *rqc; 4443 + void *in; 4444 + 4445 + required_cmd_sz = offsetof(typeof(ucmd), reserved) + sizeof(ucmd.reserved); 4446 + if (udata->inlen < required_cmd_sz) 4447 + return -EINVAL; 4448 + 4449 + if (udata->inlen > sizeof(ucmd) && 4450 + !ib_is_udata_cleared(udata, sizeof(ucmd), 4451 + udata->inlen - sizeof(ucmd))) 4452 + return -EOPNOTSUPP; 4453 + 4454 + if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) 4455 + return -EFAULT; 4456 + 4457 + if (ucmd.comp_mask || ucmd.reserved) 4458 + return -EOPNOTSUPP; 4459 + 4460 + inlen = MLX5_ST_SZ_BYTES(modify_rq_in); 4461 + in = mlx5_vzalloc(inlen); 4462 + if (!in) 4463 + return -ENOMEM; 4464 + 4465 + rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); 4466 + 4467 + curr_wq_state = (wq_attr_mask & IB_WQ_CUR_STATE) ? 4468 + wq_attr->curr_wq_state : wq->state; 4469 + wq_state = (wq_attr_mask & IB_WQ_STATE) ? 4470 + wq_attr->wq_state : curr_wq_state; 4471 + if (curr_wq_state == IB_WQS_ERR) 4472 + curr_wq_state = MLX5_RQC_STATE_ERR; 4473 + if (wq_state == IB_WQS_ERR) 4474 + wq_state = MLX5_RQC_STATE_ERR; 4475 + MLX5_SET(modify_rq_in, in, rq_state, curr_wq_state); 4476 + MLX5_SET(rqc, rqc, state, wq_state); 4477 + 4478 + err = mlx5_core_modify_rq(dev->mdev, rwq->rqn, in, inlen); 4479 + kvfree(in); 4480 + if (!err) 4481 + rwq->ibwq.state = (wq_state == MLX5_RQC_STATE_ERR) ? IB_WQS_ERR : wq_state; 4482 + 4483 + return err; 4499 4484 }
+49 -63
drivers/infiniband/hw/mlx5/srq.c
··· 74 74 } 75 75 76 76 static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, 77 - struct mlx5_create_srq_mbox_in **in, 78 - struct ib_udata *udata, int buf_size, int *inlen, 79 - int is_xrc) 77 + struct mlx5_srq_attr *in, 78 + struct ib_udata *udata, int buf_size) 80 79 { 81 80 struct mlx5_ib_dev *dev = to_mdev(pd->device); 82 81 struct mlx5_ib_create_srq ucmd = {}; 83 82 size_t ucmdlen; 84 - void *xsrqc; 85 83 int err; 86 84 int npages; 87 85 int page_shift; ··· 102 104 udata->inlen - sizeof(ucmd))) 103 105 return -EINVAL; 104 106 105 - if (is_xrc) { 107 + if (in->type == IB_SRQT_XRC) { 106 108 err = get_srq_user_index(to_mucontext(pd->uobject->context), 107 109 &ucmd, udata->inlen, &uidx); 108 110 if (err) ··· 128 130 goto err_umem; 129 131 } 130 132 131 - *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont; 132 - *in = mlx5_vzalloc(*inlen); 133 - if (!(*in)) { 133 + in->pas = mlx5_vzalloc(sizeof(*in->pas) * ncont); 134 + if (!in->pas) { 134 135 err = -ENOMEM; 135 136 goto err_umem; 136 137 } 137 138 138 - mlx5_ib_populate_pas(dev, srq->umem, page_shift, (*in)->pas, 0); 139 + mlx5_ib_populate_pas(dev, srq->umem, page_shift, in->pas, 0); 139 140 140 141 err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context), 141 142 ucmd.db_addr, &srq->db); ··· 143 146 goto err_in; 144 147 } 145 148 146 - (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; 147 - (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26); 148 - 149 - if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) && 150 - is_xrc){ 151 - xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in, 152 - xrc_srq_context_entry); 153 - MLX5_SET(xrc_srqc, xsrqc, user_index, uidx); 154 - } 149 + in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT; 150 + in->page_offset = offset; 151 + if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 && 152 + in->type == IB_SRQT_XRC) 153 + in->user_index = uidx; 155 154 156 155 return 0; 157 156 158 157 err_in: 159 - kvfree(*in); 158 + kvfree(in->pas); 160 159 161 160 err_umem: 162 161 ib_umem_release(srq->umem); ··· 161 168 } 162 169 163 170 static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, 164 - struct mlx5_create_srq_mbox_in **in, int buf_size, 165 - int *inlen, int is_xrc) 171 + struct mlx5_srq_attr *in, int buf_size) 166 172 { 167 173 int err; 168 174 int i; 169 175 struct mlx5_wqe_srq_next_seg *next; 170 176 int page_shift; 171 177 int npages; 172 - void *xsrqc; 173 178 174 179 err = mlx5_db_alloc(dev->mdev, &srq->db); 175 180 if (err) { ··· 195 204 npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT)); 196 205 mlx5_ib_dbg(dev, "buf_size %d, page_shift %d, npages %d, calc npages %d\n", 197 206 buf_size, page_shift, srq->buf.npages, npages); 198 - *inlen = sizeof(**in) + sizeof(*(*in)->pas) * npages; 199 - *in = mlx5_vzalloc(*inlen); 200 - if (!*in) { 207 + in->pas = mlx5_vzalloc(sizeof(*in->pas) * npages); 208 + if (!in->pas) { 201 209 err = -ENOMEM; 202 210 goto err_buf; 203 211 } 204 - mlx5_fill_page_array(&srq->buf, (*in)->pas); 212 + mlx5_fill_page_array(&srq->buf, in->pas); 205 213 206 214 srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL); 207 215 if (!srq->wrid) { ··· 211 221 } 212 222 srq->wq_sig = !!srq_signature; 213 223 214 - (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; 215 - 216 - if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) && 217 - is_xrc){ 218 - xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in, 219 - xrc_srq_context_entry); 220 - /* 0xffffff means we ask to work with cqe version 0 */ 221 - MLX5_SET(xrc_srqc, xsrqc, user_index, MLX5_IB_DEFAULT_UIDX); 222 - } 224 + in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT; 225 + if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 && 226 + in->type == IB_SRQT_XRC) 227 + in->user_index = MLX5_IB_DEFAULT_UIDX; 223 228 224 229 return 0; 225 230 226 231 err_in: 227 - kvfree(*in); 232 + kvfree(in->pas); 228 233 229 234 err_buf: 230 235 mlx5_buf_free(dev->mdev, &srq->buf); ··· 252 267 int desc_size; 253 268 int buf_size; 254 269 int err; 255 - struct mlx5_create_srq_mbox_in *uninitialized_var(in); 256 - int uninitialized_var(inlen); 257 - int is_xrc; 258 - u32 flgs, xrcdn; 270 + struct mlx5_srq_attr in = {0}; 259 271 __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); 260 272 261 273 /* Sanity check SRQ size before proceeding */ ··· 284 302 desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, 285 303 srq->msrq.max_avail_gather); 286 304 287 - is_xrc = (init_attr->srq_type == IB_SRQT_XRC); 288 - 289 305 if (pd->uobject) 290 - err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen, 291 - is_xrc); 306 + err = create_srq_user(pd, srq, &in, udata, buf_size); 292 307 else 293 - err = create_srq_kernel(dev, srq, &in, buf_size, &inlen, 294 - is_xrc); 308 + err = create_srq_kernel(dev, srq, &in, buf_size); 295 309 296 310 if (err) { 297 311 mlx5_ib_warn(dev, "create srq %s failed, err %d\n", ··· 295 317 goto err_srq; 296 318 } 297 319 298 - in->ctx.state_log_sz = ilog2(srq->msrq.max); 299 - flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24; 300 - xrcdn = 0; 301 - if (is_xrc) { 302 - xrcdn = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn; 303 - in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(init_attr->ext.xrc.cq)->mcq.cqn); 320 + in.type = init_attr->srq_type; 321 + in.log_size = ilog2(srq->msrq.max); 322 + in.wqe_shift = srq->msrq.wqe_shift - 4; 323 + if (srq->wq_sig) 324 + in.flags |= MLX5_SRQ_FLAG_WQ_SIG; 325 + if (init_attr->srq_type == IB_SRQT_XRC) { 326 + in.xrcd = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn; 327 + in.cqn = to_mcq(init_attr->ext.xrc.cq)->mcq.cqn; 304 328 } else if (init_attr->srq_type == IB_SRQT_BASIC) { 305 - xrcdn = to_mxrcd(dev->devr.x0)->xrcdn; 306 - in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(dev->devr.c0)->mcq.cqn); 329 + in.xrcd = to_mxrcd(dev->devr.x0)->xrcdn; 330 + in.cqn = to_mcq(dev->devr.c0)->mcq.cqn; 307 331 } 308 332 309 - in->ctx.flags_xrcd = cpu_to_be32((flgs & 0xFF000000) | (xrcdn & 0xFFFFFF)); 310 - 311 - in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn); 312 - in->ctx.db_record = cpu_to_be64(srq->db.dma); 313 - err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen, is_xrc); 314 - kvfree(in); 333 + in.pd = to_mpd(pd)->pdn; 334 + in.db_record = srq->db.dma; 335 + err = mlx5_core_create_srq(dev->mdev, &srq->msrq, &in); 336 + kvfree(in.pas); 315 337 if (err) { 316 338 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err); 317 339 goto err_usr_kern_srq; ··· 379 401 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); 380 402 struct mlx5_ib_srq *srq = to_msrq(ibsrq); 381 403 int ret; 382 - struct mlx5_query_srq_mbox_out *out; 404 + struct mlx5_srq_attr *out; 383 405 384 406 out = kzalloc(sizeof(*out), GFP_KERNEL); 385 407 if (!out) ··· 389 411 if (ret) 390 412 goto out_box; 391 413 392 - srq_attr->srq_limit = be16_to_cpu(out->ctx.lwm); 414 + srq_attr->srq_limit = out->lwm; 393 415 srq_attr->max_wr = srq->msrq.max - 1; 394 416 srq_attr->max_sge = srq->msrq.max_gs; 395 417 ··· 436 458 struct mlx5_ib_srq *srq = to_msrq(ibsrq); 437 459 struct mlx5_wqe_srq_next_seg *next; 438 460 struct mlx5_wqe_data_seg *scat; 461 + struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); 462 + struct mlx5_core_dev *mdev = dev->mdev; 439 463 unsigned long flags; 440 464 int err = 0; 441 465 int nreq; 442 466 int i; 443 467 444 468 spin_lock_irqsave(&srq->lock, flags); 469 + 470 + if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 471 + err = -EIO; 472 + *bad_wr = wr; 473 + goto out; 474 + } 445 475 446 476 for (nreq = 0; wr; nreq++, wr = wr->next) { 447 477 if (unlikely(wr->num_sge > srq->msrq.max_gs)) { ··· 493 507 494 508 *srq->db.db = cpu_to_be32(srq->wqe_ctr); 495 509 } 496 - 510 + out: 497 511 spin_unlock_irqrestore(&srq->lock, flags); 498 512 499 513 return err;
+86 -2
drivers/infiniband/hw/mlx5/user.h
··· 46 46 MLX5_SRQ_FLAG_SIGNATURE = 1 << 0, 47 47 }; 48 48 49 + enum { 50 + MLX5_WQ_FLAG_SIGNATURE = 1 << 0, 51 + }; 52 + 49 53 50 54 /* Increment this value if any changes that break userspace ABI 51 55 * compatibility are made. ··· 83 79 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0, 84 80 }; 85 81 82 + enum mlx5_user_cmds_supp_uhw { 83 + MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0, 84 + }; 85 + 86 86 struct mlx5_ib_alloc_ucontext_resp { 87 87 __u32 qp_tab_size; 88 88 __u32 bf_reg_size; ··· 102 94 __u32 comp_mask; 103 95 __u32 response_length; 104 96 __u8 cqe_version; 105 - __u8 reserved2; 106 - __u16 reserved3; 97 + __u8 cmds_supp_uhw; 98 + __u16 reserved2; 107 99 __u64 hca_core_clock_offset; 108 100 }; 109 101 110 102 struct mlx5_ib_alloc_pd_resp { 111 103 __u32 pdn; 104 + }; 105 + 106 + struct mlx5_ib_tso_caps { 107 + __u32 max_tso; /* Maximum tso payload size in bytes */ 108 + 109 + /* Corresponding bit will be set if qp type from 110 + * 'enum ib_qp_type' is supported, e.g. 111 + * supported_qpts |= 1 << IB_QPT_UD 112 + */ 113 + __u32 supported_qpts; 114 + }; 115 + 116 + struct mlx5_ib_query_device_resp { 117 + __u32 comp_mask; 118 + __u32 response_length; 119 + struct mlx5_ib_tso_caps tso_caps; 112 120 }; 113 121 114 122 struct mlx5_ib_create_cq { ··· 172 148 __u64 sq_buf_addr; 173 149 }; 174 150 151 + /* RX Hash function flags */ 152 + enum mlx5_rx_hash_function_flags { 153 + MLX5_RX_HASH_FUNC_TOEPLITZ = 1 << 0, 154 + }; 155 + 156 + /* 157 + * RX Hash flags, these flags allows to set which incoming packet's field should 158 + * participates in RX Hash. Each flag represent certain packet's field, 159 + * when the flag is set the field that is represented by the flag will 160 + * participate in RX Hash calculation. 161 + * Note: *IPV4 and *IPV6 flags can't be enabled together on the same QP 162 + * and *TCP and *UDP flags can't be enabled together on the same QP. 163 + */ 164 + enum mlx5_rx_hash_fields { 165 + MLX5_RX_HASH_SRC_IPV4 = 1 << 0, 166 + MLX5_RX_HASH_DST_IPV4 = 1 << 1, 167 + MLX5_RX_HASH_SRC_IPV6 = 1 << 2, 168 + MLX5_RX_HASH_DST_IPV6 = 1 << 3, 169 + MLX5_RX_HASH_SRC_PORT_TCP = 1 << 4, 170 + MLX5_RX_HASH_DST_PORT_TCP = 1 << 5, 171 + MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6, 172 + MLX5_RX_HASH_DST_PORT_UDP = 1 << 7 173 + }; 174 + 175 + struct mlx5_ib_create_qp_rss { 176 + __u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */ 177 + __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */ 178 + __u8 rx_key_len; /* valid only for Toeplitz */ 179 + __u8 reserved[6]; 180 + __u8 rx_hash_key[128]; /* valid only for Toeplitz */ 181 + __u32 comp_mask; 182 + __u32 reserved1; 183 + }; 184 + 175 185 struct mlx5_ib_create_qp_resp { 176 186 __u32 uuar_index; 177 187 }; ··· 215 157 __u8 num_klms; 216 158 __u8 reserved1; 217 159 __u16 reserved2; 160 + }; 161 + 162 + struct mlx5_ib_create_wq { 163 + __u64 buf_addr; 164 + __u64 db_addr; 165 + __u32 rq_wqe_count; 166 + __u32 rq_wqe_shift; 167 + __u32 user_index; 168 + __u32 flags; 169 + __u32 comp_mask; 170 + __u32 reserved; 171 + }; 172 + 173 + struct mlx5_ib_create_wq_resp { 174 + __u32 response_length; 175 + __u32 reserved; 176 + }; 177 + 178 + struct mlx5_ib_create_rwq_ind_tbl_resp { 179 + __u32 response_length; 180 + __u32 reserved; 181 + }; 182 + 183 + struct mlx5_ib_modify_wq { 184 + __u32 comp_mask; 185 + __u32 reserved; 218 186 }; 219 187 220 188 static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
+12 -12
drivers/infiniband/hw/mthca/mthca_provider.c
··· 1081 1081 return sprintf(buf, "%x\n", dev->rev_id); 1082 1082 } 1083 1083 1084 - static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, 1085 - char *buf) 1086 - { 1087 - struct mthca_dev *dev = 1088 - container_of(device, struct mthca_dev, ib_dev.dev); 1089 - return sprintf(buf, "%d.%d.%d\n", (int) (dev->fw_ver >> 32), 1090 - (int) (dev->fw_ver >> 16) & 0xffff, 1091 - (int) dev->fw_ver & 0xffff); 1092 - } 1093 - 1094 1084 static ssize_t show_hca(struct device *device, struct device_attribute *attr, 1095 1085 char *buf) 1096 1086 { ··· 1110 1120 } 1111 1121 1112 1122 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 1113 - static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); 1114 1123 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); 1115 1124 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); 1116 1125 1117 1126 static struct device_attribute *mthca_dev_attributes[] = { 1118 1127 &dev_attr_hw_rev, 1119 - &dev_attr_fw_ver, 1120 1128 &dev_attr_hca_type, 1121 1129 &dev_attr_board_id 1122 1130 }; ··· 1173 1185 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 1174 1186 1175 1187 return 0; 1188 + } 1189 + 1190 + static void get_dev_fw_str(struct ib_device *device, char *str, 1191 + size_t str_len) 1192 + { 1193 + struct mthca_dev *dev = 1194 + container_of(device, struct mthca_dev, ib_dev); 1195 + snprintf(str, str_len, "%d.%d.%d", 1196 + (int) (dev->fw_ver >> 32), 1197 + (int) (dev->fw_ver >> 16) & 0xffff, 1198 + (int) dev->fw_ver & 0xffff); 1176 1199 } 1177 1200 1178 1201 int mthca_register_device(struct mthca_dev *dev) ··· 1265 1266 dev->ib_dev.reg_user_mr = mthca_reg_user_mr; 1266 1267 dev->ib_dev.dereg_mr = mthca_dereg_mr; 1267 1268 dev->ib_dev.get_port_immutable = mthca_port_immutable; 1269 + dev->ib_dev.get_dev_fw_str = get_dev_fw_str; 1268 1270 1269 1271 if (dev->mthca_flags & MTHCA_FLAG_FMR) { 1270 1272 dev->ib_dev.alloc_fmr = mthca_alloc_fmr;
+14 -19
drivers/infiniband/hw/nes/nes_verbs.c
··· 2606 2606 2607 2607 2608 2608 /** 2609 - * show_fw_ver 2610 - */ 2611 - static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, 2612 - char *buf) 2613 - { 2614 - struct nes_ib_device *nesibdev = 2615 - container_of(dev, struct nes_ib_device, ibdev.dev); 2616 - struct nes_vnic *nesvnic = nesibdev->nesvnic; 2617 - 2618 - nes_debug(NES_DBG_INIT, "\n"); 2619 - return sprintf(buf, "%u.%u\n", 2620 - (nesvnic->nesdev->nesadapter->firmware_version >> 16), 2621 - (nesvnic->nesdev->nesadapter->firmware_version & 0x000000ff)); 2622 - } 2623 - 2624 - 2625 - /** 2626 2609 * show_hca 2627 2610 */ 2628 2611 static ssize_t show_hca(struct device *dev, struct device_attribute *attr, ··· 2628 2645 2629 2646 2630 2647 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 2631 - static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); 2632 2648 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); 2633 2649 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); 2634 2650 2635 2651 static struct device_attribute *nes_dev_attributes[] = { 2636 2652 &dev_attr_hw_rev, 2637 - &dev_attr_fw_ver, 2638 2653 &dev_attr_hca_type, 2639 2654 &dev_attr_board_id 2640 2655 }; ··· 3684 3703 return 0; 3685 3704 } 3686 3705 3706 + static void get_dev_fw_str(struct ib_device *dev, char *str, 3707 + size_t str_len) 3708 + { 3709 + struct nes_ib_device *nesibdev = 3710 + container_of(dev, struct nes_ib_device, ibdev); 3711 + struct nes_vnic *nesvnic = nesibdev->nesvnic; 3712 + 3713 + nes_debug(NES_DBG_INIT, "\n"); 3714 + snprintf(str, str_len, "%u.%u", 3715 + (nesvnic->nesdev->nesadapter->firmware_version >> 16), 3716 + (nesvnic->nesdev->nesadapter->firmware_version & 0x000000ff)); 3717 + } 3718 + 3687 3719 /** 3688 3720 * nes_init_ofa_device 3689 3721 */ ··· 3796 3802 nesibdev->ibdev.iwcm->create_listen = nes_create_listen; 3797 3803 nesibdev->ibdev.iwcm->destroy_listen = nes_destroy_listen; 3798 3804 nesibdev->ibdev.get_port_immutable = nes_port_immutable; 3805 + nesibdev->ibdev.get_dev_fw_str = get_dev_fw_str; 3799 3806 memcpy(nesibdev->ibdev.iwcm->ifname, netdev->name, 3800 3807 sizeof(nesibdev->ibdev.iwcm->ifname)); 3801 3808
+9 -10
drivers/infiniband/hw/ocrdma/ocrdma_main.c
··· 107 107 return 0; 108 108 } 109 109 110 + static void get_dev_fw_str(struct ib_device *device, char *str, 111 + size_t str_len) 112 + { 113 + struct ocrdma_dev *dev = get_ocrdma_dev(device); 114 + 115 + snprintf(str, str_len, "%s", &dev->attr.fw_ver[0]); 116 + } 117 + 110 118 static int ocrdma_register_device(struct ocrdma_dev *dev) 111 119 { 112 120 strlcpy(dev->ibdev.name, "ocrdma%d", IB_DEVICE_NAME_MAX); ··· 201 193 202 194 dev->ibdev.process_mad = ocrdma_process_mad; 203 195 dev->ibdev.get_port_immutable = ocrdma_port_immutable; 196 + dev->ibdev.get_dev_fw_str = get_dev_fw_str; 204 197 205 198 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { 206 199 dev->ibdev.uverbs_cmd_mask |= ··· 271 262 return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->nic_info.pdev->vendor); 272 263 } 273 264 274 - static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, 275 - char *buf) 276 - { 277 - struct ocrdma_dev *dev = dev_get_drvdata(device); 278 - 279 - return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->attr.fw_ver[0]); 280 - } 281 - 282 265 static ssize_t show_hca_type(struct device *device, 283 266 struct device_attribute *attr, char *buf) 284 267 { ··· 280 279 } 281 280 282 281 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 283 - static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); 284 282 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL); 285 283 286 284 static struct device_attribute *ocrdma_attributes[] = { 287 285 &dev_attr_hw_rev, 288 - &dev_attr_fw_ver, 289 286 &dev_attr_hca_type 290 287 }; 291 288
+16
drivers/infiniband/hw/usnic/usnic_ib_main.c
··· 331 331 return 0; 332 332 } 333 333 334 + static void usnic_get_dev_fw_str(struct ib_device *device, 335 + char *str, 336 + size_t str_len) 337 + { 338 + struct usnic_ib_dev *us_ibdev = 339 + container_of(device, struct usnic_ib_dev, ib_dev); 340 + struct ethtool_drvinfo info; 341 + 342 + mutex_lock(&us_ibdev->usdev_lock); 343 + us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info); 344 + mutex_unlock(&us_ibdev->usdev_lock); 345 + 346 + snprintf(str, str_len, "%s", info.fw_version); 347 + } 348 + 334 349 /* Start of PF discovery section */ 335 350 static void *usnic_ib_device_add(struct pci_dev *dev) 336 351 { ··· 429 414 us_ibdev->ib_dev.req_notify_cq = usnic_ib_req_notify_cq; 430 415 us_ibdev->ib_dev.get_dma_mr = usnic_ib_get_dma_mr; 431 416 us_ibdev->ib_dev.get_port_immutable = usnic_port_immutable; 417 + us_ibdev->ib_dev.get_dev_fw_str = usnic_get_dev_fw_str; 432 418 433 419 434 420 if (ib_register_device(&us_ibdev->ib_dev, NULL))
-17
drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
··· 45 45 #include "usnic_ib_verbs.h" 46 46 #include "usnic_log.h" 47 47 48 - static ssize_t usnic_ib_show_fw_ver(struct device *device, 49 - struct device_attribute *attr, 50 - char *buf) 51 - { 52 - struct usnic_ib_dev *us_ibdev = 53 - container_of(device, struct usnic_ib_dev, ib_dev.dev); 54 - struct ethtool_drvinfo info; 55 - 56 - mutex_lock(&us_ibdev->usdev_lock); 57 - us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info); 58 - mutex_unlock(&us_ibdev->usdev_lock); 59 - 60 - return scnprintf(buf, PAGE_SIZE, "%s\n", info.fw_version); 61 - } 62 - 63 48 static ssize_t usnic_ib_show_board(struct device *device, 64 49 struct device_attribute *attr, 65 50 char *buf) ··· 177 192 us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ]); 178 193 } 179 194 180 - static DEVICE_ATTR(fw_ver, S_IRUGO, usnic_ib_show_fw_ver, NULL); 181 195 static DEVICE_ATTR(board_id, S_IRUGO, usnic_ib_show_board, NULL); 182 196 static DEVICE_ATTR(config, S_IRUGO, usnic_ib_show_config, NULL); 183 197 static DEVICE_ATTR(iface, S_IRUGO, usnic_ib_show_iface, NULL); ··· 185 201 static DEVICE_ATTR(cq_per_vf, S_IRUGO, usnic_ib_show_cq_per_vf, NULL); 186 202 187 203 static struct device_attribute *usnic_class_attributes[] = { 188 - &dev_attr_fw_ver, 189 204 &dev_attr_board_id, 190 205 &dev_attr_config, 191 206 &dev_attr_iface,
+2 -4
drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
··· 62 62 { 63 63 struct ipoib_dev_priv *priv = netdev_priv(netdev); 64 64 65 - snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 66 - "%d.%d.%d", (int)(priv->ca->attrs.fw_ver >> 32), 67 - (int)(priv->ca->attrs.fw_ver >> 16) & 0xffff, 68 - (int)priv->ca->attrs.fw_ver & 0xffff); 65 + ib_get_device_fw_str(priv->ca, drvinfo->fw_version, 66 + sizeof(drvinfo->fw_version)); 69 67 70 68 strlcpy(drvinfo->bus_info, dev_name(priv->ca->dma_device), 71 69 sizeof(drvinfo->bus_info));
+2
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
··· 104 104 105 105 enum CPL_error { 106 106 CPL_ERR_NONE = 0, 107 + CPL_ERR_TCAM_PARITY = 1, 108 + CPL_ERR_TCAM_MISS = 2, 107 109 CPL_ERR_TCAM_FULL = 3, 108 110 CPL_ERR_BAD_LENGTH = 15, 109 111 CPL_ERR_BAD_ROUTE = 18,
+144 -115
drivers/net/ethernet/mellanox/mlx5/core/srq.c
··· 63 63 complete(&srq->free); 64 64 } 65 65 66 - static int get_pas_size(void *srqc) 66 + static int get_pas_size(struct mlx5_srq_attr *in) 67 67 { 68 - u32 log_page_size = MLX5_GET(srqc, srqc, log_page_size) + 12; 69 - u32 log_srq_size = MLX5_GET(srqc, srqc, log_srq_size); 70 - u32 log_rq_stride = MLX5_GET(srqc, srqc, log_rq_stride); 71 - u32 page_offset = MLX5_GET(srqc, srqc, page_offset); 68 + u32 log_page_size = in->log_page_size + 12; 69 + u32 log_srq_size = in->log_size; 70 + u32 log_rq_stride = in->wqe_shift; 71 + u32 page_offset = in->page_offset; 72 72 u32 po_quanta = 1 << (log_page_size - 6); 73 73 u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride); 74 74 u32 page_size = 1 << log_page_size; ··· 78 78 return rq_num_pas * sizeof(u64); 79 79 } 80 80 81 - static void rmpc_srqc_reformat(void *srqc, void *rmpc, bool srqc_to_rmpc) 81 + static void set_wq(void *wq, struct mlx5_srq_attr *in) 82 82 { 83 - void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq); 83 + MLX5_SET(wq, wq, wq_signature, !!(in->flags 84 + & MLX5_SRQ_FLAG_WQ_SIG)); 85 + MLX5_SET(wq, wq, log_wq_pg_sz, in->log_page_size); 86 + MLX5_SET(wq, wq, log_wq_stride, in->wqe_shift + 4); 87 + MLX5_SET(wq, wq, log_wq_sz, in->log_size); 88 + MLX5_SET(wq, wq, page_offset, in->page_offset); 89 + MLX5_SET(wq, wq, lwm, in->lwm); 90 + MLX5_SET(wq, wq, pd, in->pd); 91 + MLX5_SET64(wq, wq, dbr_addr, in->db_record); 92 + } 84 93 85 - if (srqc_to_rmpc) { 86 - switch (MLX5_GET(srqc, srqc, state)) { 87 - case MLX5_SRQC_STATE_GOOD: 88 - MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY); 89 - break; 90 - case MLX5_SRQC_STATE_ERROR: 91 - MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_ERR); 92 - break; 93 - default: 94 - pr_warn("%s: %d: Unknown srq state = 0x%x\n", __func__, 95 - __LINE__, MLX5_GET(srqc, srqc, state)); 96 - MLX5_SET(rmpc, rmpc, state, MLX5_GET(srqc, srqc, state)); 97 - } 94 + static void set_srqc(void *srqc, struct mlx5_srq_attr *in) 95 + { 96 + MLX5_SET(srqc, srqc, wq_signature, !!(in->flags 97 + & MLX5_SRQ_FLAG_WQ_SIG)); 98 + MLX5_SET(srqc, srqc, log_page_size, in->log_page_size); 99 + MLX5_SET(srqc, srqc, log_rq_stride, in->wqe_shift); 100 + MLX5_SET(srqc, srqc, log_srq_size, in->log_size); 101 + MLX5_SET(srqc, srqc, page_offset, in->page_offset); 102 + MLX5_SET(srqc, srqc, lwm, in->lwm); 103 + MLX5_SET(srqc, srqc, pd, in->pd); 104 + MLX5_SET64(srqc, srqc, dbr_addr, in->db_record); 105 + MLX5_SET(srqc, srqc, xrcd, in->xrcd); 106 + MLX5_SET(srqc, srqc, cqn, in->cqn); 107 + } 98 108 99 - MLX5_SET(wq, wq, wq_signature, MLX5_GET(srqc, srqc, wq_signature)); 100 - MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(srqc, srqc, log_page_size)); 101 - MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(srqc, srqc, log_rq_stride) + 4); 102 - MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(srqc, srqc, log_srq_size)); 103 - MLX5_SET(wq, wq, page_offset, MLX5_GET(srqc, srqc, page_offset)); 104 - MLX5_SET(wq, wq, lwm, MLX5_GET(srqc, srqc, lwm)); 105 - MLX5_SET(wq, wq, pd, MLX5_GET(srqc, srqc, pd)); 106 - MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(srqc, srqc, dbr_addr)); 107 - } else { 108 - switch (MLX5_GET(rmpc, rmpc, state)) { 109 - case MLX5_RMPC_STATE_RDY: 110 - MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_GOOD); 111 - break; 112 - case MLX5_RMPC_STATE_ERR: 113 - MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_ERROR); 114 - break; 115 - default: 116 - pr_warn("%s: %d: Unknown rmp state = 0x%x\n", 117 - __func__, __LINE__, 118 - MLX5_GET(rmpc, rmpc, state)); 119 - MLX5_SET(srqc, srqc, state, 120 - MLX5_GET(rmpc, rmpc, state)); 121 - } 109 + static void get_wq(void *wq, struct mlx5_srq_attr *in) 110 + { 111 + if (MLX5_GET(wq, wq, wq_signature)) 112 + in->flags &= MLX5_SRQ_FLAG_WQ_SIG; 113 + in->log_page_size = MLX5_GET(wq, wq, log_wq_pg_sz); 114 + in->wqe_shift = MLX5_GET(wq, wq, log_wq_stride) - 4; 115 + in->log_size = MLX5_GET(wq, wq, log_wq_sz); 116 + in->page_offset = MLX5_GET(wq, wq, page_offset); 117 + in->lwm = MLX5_GET(wq, wq, lwm); 118 + in->pd = MLX5_GET(wq, wq, pd); 119 + in->db_record = MLX5_GET64(wq, wq, dbr_addr); 120 + } 122 121 123 - MLX5_SET(srqc, srqc, wq_signature, MLX5_GET(wq, wq, wq_signature)); 124 - MLX5_SET(srqc, srqc, log_page_size, MLX5_GET(wq, wq, log_wq_pg_sz)); 125 - MLX5_SET(srqc, srqc, log_rq_stride, MLX5_GET(wq, wq, log_wq_stride) - 4); 126 - MLX5_SET(srqc, srqc, log_srq_size, MLX5_GET(wq, wq, log_wq_sz)); 127 - MLX5_SET(srqc, srqc, page_offset, MLX5_GET(wq, wq, page_offset)); 128 - MLX5_SET(srqc, srqc, lwm, MLX5_GET(wq, wq, lwm)); 129 - MLX5_SET(srqc, srqc, pd, MLX5_GET(wq, wq, pd)); 130 - MLX5_SET64(srqc, srqc, dbr_addr, MLX5_GET64(wq, wq, dbr_addr)); 131 - } 122 + static void get_srqc(void *srqc, struct mlx5_srq_attr *in) 123 + { 124 + if (MLX5_GET(srqc, srqc, wq_signature)) 125 + in->flags &= MLX5_SRQ_FLAG_WQ_SIG; 126 + in->log_page_size = MLX5_GET(srqc, srqc, log_page_size); 127 + in->wqe_shift = MLX5_GET(srqc, srqc, log_rq_stride); 128 + in->log_size = MLX5_GET(srqc, srqc, log_srq_size); 129 + in->page_offset = MLX5_GET(srqc, srqc, page_offset); 130 + in->lwm = MLX5_GET(srqc, srqc, lwm); 131 + in->pd = MLX5_GET(srqc, srqc, pd); 132 + in->db_record = MLX5_GET64(srqc, srqc, dbr_addr); 132 133 } 133 134 134 135 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn) ··· 150 149 EXPORT_SYMBOL(mlx5_core_get_srq); 151 150 152 151 static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 153 - struct mlx5_create_srq_mbox_in *in, int inlen) 152 + struct mlx5_srq_attr *in) 154 153 { 155 - struct mlx5_create_srq_mbox_out out; 154 + u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0}; 155 + void *create_in; 156 + void *srqc; 157 + void *pas; 158 + int pas_size; 159 + int inlen; 156 160 int err; 157 161 158 - memset(&out, 0, sizeof(out)); 162 + pas_size = get_pas_size(in); 163 + inlen = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size; 164 + create_in = mlx5_vzalloc(inlen); 165 + if (!create_in) 166 + return -ENOMEM; 159 167 160 - in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ); 168 + srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry); 169 + pas = MLX5_ADDR_OF(create_srq_in, create_in, pas); 161 170 162 - err = mlx5_cmd_exec_check_status(dev, (u32 *)in, inlen, (u32 *)(&out), 163 - sizeof(out)); 171 + set_srqc(srqc, in); 172 + memcpy(pas, in->pas, pas_size); 164 173 165 - srq->srqn = be32_to_cpu(out.srqn) & 0xffffff; 174 + MLX5_SET(create_srq_in, create_in, opcode, 175 + MLX5_CMD_OP_CREATE_SRQ); 176 + 177 + err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out, 178 + sizeof(create_out)); 179 + kvfree(create_in); 180 + if (!err) 181 + srq->srqn = MLX5_GET(create_srq_out, create_out, srqn); 166 182 167 183 return err; 168 184 } ··· 187 169 static int destroy_srq_cmd(struct mlx5_core_dev *dev, 188 170 struct mlx5_core_srq *srq) 189 171 { 190 - struct mlx5_destroy_srq_mbox_in in; 191 - struct mlx5_destroy_srq_mbox_out out; 172 + u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0}; 173 + u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0}; 192 174 193 - memset(&in, 0, sizeof(in)); 194 - memset(&out, 0, sizeof(out)); 195 - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ); 196 - in.srqn = cpu_to_be32(srq->srqn); 175 + MLX5_SET(destroy_srq_in, srq_in, opcode, 176 + MLX5_CMD_OP_DESTROY_SRQ); 177 + MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn); 197 178 198 - return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), 199 - (u32 *)(&out), sizeof(out)); 179 + return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in), 180 + srq_out, sizeof(srq_out)); 200 181 } 201 182 202 183 static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 203 184 u16 lwm, int is_srq) 204 185 { 205 - struct mlx5_arm_srq_mbox_in in; 206 - struct mlx5_arm_srq_mbox_out out; 186 + /* arm_srq structs missing using identical xrc ones */ 187 + u32 srq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0}; 188 + u32 srq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0}; 207 189 208 - memset(&in, 0, sizeof(in)); 209 - memset(&out, 0, sizeof(out)); 190 + MLX5_SET(arm_xrc_srq_in, srq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ); 191 + MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn); 192 + MLX5_SET(arm_xrc_srq_in, srq_in, lwm, lwm); 210 193 211 - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ); 212 - in.hdr.opmod = cpu_to_be16(!!is_srq); 213 - in.srqn = cpu_to_be32(srq->srqn); 214 - in.lwm = cpu_to_be16(lwm); 215 - 216 - return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), 217 - sizeof(in), (u32 *)(&out), 218 - sizeof(out)); 194 + return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in), 195 + srq_out, sizeof(srq_out)); 219 196 } 220 197 221 198 static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 222 - struct mlx5_query_srq_mbox_out *out) 199 + struct mlx5_srq_attr *out) 223 200 { 224 - struct mlx5_query_srq_mbox_in in; 201 + u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0}; 202 + u32 *srq_out; 203 + void *srqc; 204 + int err; 225 205 226 - memset(&in, 0, sizeof(in)); 206 + srq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_srq_out)); 207 + if (!srq_out) 208 + return -ENOMEM; 227 209 228 - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ); 229 - in.srqn = cpu_to_be32(srq->srqn); 210 + MLX5_SET(query_srq_in, srq_in, opcode, 211 + MLX5_CMD_OP_QUERY_SRQ); 212 + MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn); 213 + err = mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in), 214 + srq_out, 215 + MLX5_ST_SZ_BYTES(query_srq_out)); 216 + if (err) 217 + goto out; 230 218 231 - return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), 232 - (u32 *)out, sizeof(*out)); 219 + srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry); 220 + get_srqc(srqc, out); 221 + if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD) 222 + out->flags |= MLX5_SRQ_FLAG_ERR; 223 + out: 224 + kvfree(srq_out); 225 + return err; 233 226 } 234 227 235 228 static int create_xrc_srq_cmd(struct mlx5_core_dev *dev, 236 229 struct mlx5_core_srq *srq, 237 - struct mlx5_create_srq_mbox_in *in, 238 - int srq_inlen) 230 + struct mlx5_srq_attr *in) 239 231 { 240 232 u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)]; 241 233 void *create_in; 242 - void *srqc; 243 234 void *xrc_srqc; 244 235 void *pas; 245 236 int pas_size; 246 237 int inlen; 247 238 int err; 248 239 249 - srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry); 250 - pas_size = get_pas_size(srqc); 240 + pas_size = get_pas_size(in); 251 241 inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size; 252 242 create_in = mlx5_vzalloc(inlen); 253 243 if (!create_in) ··· 265 239 xrc_srq_context_entry); 266 240 pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas); 267 241 268 - memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc)); 242 + set_srqc(xrc_srqc, in); 243 + MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index); 269 244 memcpy(pas, in->pas, pas_size); 270 245 MLX5_SET(create_xrc_srq_in, create_in, opcode, 271 246 MLX5_CMD_OP_CREATE_XRC_SRQ); ··· 320 293 321 294 static int query_xrc_srq_cmd(struct mlx5_core_dev *dev, 322 295 struct mlx5_core_srq *srq, 323 - struct mlx5_query_srq_mbox_out *out) 296 + struct mlx5_srq_attr *out) 324 297 { 325 298 u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)]; 326 299 u32 *xrcsrq_out; 327 - void *srqc; 328 300 void *xrc_srqc; 329 301 int err; 330 302 ··· 343 317 344 318 xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out, 345 319 xrc_srq_context_entry); 346 - srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry); 347 - memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc)); 320 + get_srqc(xrc_srqc, out); 321 + if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD) 322 + out->flags |= MLX5_SRQ_FLAG_ERR; 348 323 349 324 out: 350 325 kvfree(xrcsrq_out); ··· 353 326 } 354 327 355 328 static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 356 - struct mlx5_create_srq_mbox_in *in, int srq_inlen) 329 + struct mlx5_srq_attr *in) 357 330 { 358 331 void *create_in; 359 332 void *rmpc; 360 - void *srqc; 333 + void *wq; 361 334 int pas_size; 362 335 int inlen; 363 336 int err; 364 337 365 - srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry); 366 - pas_size = get_pas_size(srqc); 338 + pas_size = get_pas_size(in); 367 339 inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size; 368 340 create_in = mlx5_vzalloc(inlen); 369 341 if (!create_in) 370 342 return -ENOMEM; 371 343 372 344 rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx); 345 + wq = MLX5_ADDR_OF(rmpc, rmpc, wq); 373 346 347 + MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY); 348 + set_wq(wq, in); 374 349 memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size); 375 - rmpc_srqc_reformat(srqc, rmpc, true); 376 350 377 351 err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn); 378 352 ··· 418 390 } 419 391 420 392 static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 421 - struct mlx5_query_srq_mbox_out *out) 393 + struct mlx5_srq_attr *out) 422 394 { 423 395 u32 *rmp_out; 424 396 void *rmpc; 425 - void *srqc; 426 397 int err; 427 398 428 399 rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out)); ··· 432 405 if (err) 433 406 goto out; 434 407 435 - srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry); 436 408 rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context); 437 - rmpc_srqc_reformat(srqc, rmpc, false); 409 + get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out); 410 + if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY) 411 + out->flags |= MLX5_SRQ_FLAG_ERR; 438 412 439 413 out: 440 414 kvfree(rmp_out); ··· 444 416 445 417 static int create_srq_split(struct mlx5_core_dev *dev, 446 418 struct mlx5_core_srq *srq, 447 - struct mlx5_create_srq_mbox_in *in, 448 - int inlen, int is_xrc) 419 + struct mlx5_srq_attr *in) 449 420 { 450 421 if (!dev->issi) 451 - return create_srq_cmd(dev, srq, in, inlen); 422 + return create_srq_cmd(dev, srq, in); 452 423 else if (srq->common.res == MLX5_RES_XSRQ) 453 - return create_xrc_srq_cmd(dev, srq, in, inlen); 424 + return create_xrc_srq_cmd(dev, srq, in); 454 425 else 455 - return create_rmp_cmd(dev, srq, in, inlen); 426 + return create_rmp_cmd(dev, srq, in); 456 427 } 457 428 458 429 static int destroy_srq_split(struct mlx5_core_dev *dev, ··· 466 439 } 467 440 468 441 int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 469 - struct mlx5_create_srq_mbox_in *in, int inlen, 470 - int is_xrc) 442 + struct mlx5_srq_attr *in) 471 443 { 472 444 int err; 473 445 struct mlx5_srq_table *table = &dev->priv.srq_table; 474 446 475 - srq->common.res = is_xrc ? MLX5_RES_XSRQ : MLX5_RES_SRQ; 447 + if (in->type == IB_SRQT_XRC) 448 + srq->common.res = MLX5_RES_XSRQ; 449 + else 450 + srq->common.res = MLX5_RES_SRQ; 476 451 477 - err = create_srq_split(dev, srq, in, inlen, is_xrc); 452 + err = create_srq_split(dev, srq, in); 478 453 if (err) 479 454 return err; 480 455 ··· 531 502 EXPORT_SYMBOL(mlx5_core_destroy_srq); 532 503 533 504 int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 534 - struct mlx5_query_srq_mbox_out *out) 505 + struct mlx5_srq_attr *out) 535 506 { 536 507 if (!dev->issi) 537 508 return query_srq_cmd(dev, srq, out);
+4
drivers/net/ethernet/mellanox/mlx5/core/transobj.c
··· 85 85 86 86 return err; 87 87 } 88 + EXPORT_SYMBOL(mlx5_core_create_rq); 88 89 89 90 int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen) 90 91 { ··· 111 110 112 111 mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); 113 112 } 113 + EXPORT_SYMBOL(mlx5_core_destroy_rq); 114 114 115 115 int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out) 116 116 { ··· 432 430 433 431 return err; 434 432 } 433 + EXPORT_SYMBOL(mlx5_core_create_rqt); 435 434 436 435 int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in, 437 436 int inlen) ··· 458 455 459 456 mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); 460 457 } 458 + EXPORT_SYMBOL(mlx5_core_destroy_rqt);
+2
include/linux/mlx5/cq.h
··· 58 58 void (*comp)(struct mlx5_core_cq *); 59 59 void *priv; 60 60 } tasklet_ctx; 61 + int reset_notify_added; 62 + struct list_head reset_notify; 61 63 }; 62 64 63 65
+3 -3
include/linux/mlx5/driver.h
··· 46 46 47 47 #include <linux/mlx5/device.h> 48 48 #include <linux/mlx5/doorbell.h> 49 + #include <linux/mlx5/srq.h> 49 50 50 51 enum { 51 52 MLX5_RQ_BITMASK_VSD = 1 << 1, ··· 773 772 void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, 774 773 struct mlx5_cmd_mailbox *head); 775 774 int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 776 - struct mlx5_create_srq_mbox_in *in, int inlen, 777 - int is_xrc); 775 + struct mlx5_srq_attr *in); 778 776 int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq); 779 777 int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 780 - struct mlx5_query_srq_mbox_out *out); 778 + struct mlx5_srq_attr *out); 781 779 int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 782 780 u16 lwm, int is_srq); 783 781 void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
+263 -12
include/linux/mlx5/mlx5_ifc.h
··· 123 123 MLX5_CMD_OP_DRAIN_DCT = 0x712, 124 124 MLX5_CMD_OP_QUERY_DCT = 0x713, 125 125 MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION = 0x714, 126 + MLX5_CMD_OP_CREATE_XRQ = 0x717, 127 + MLX5_CMD_OP_DESTROY_XRQ = 0x718, 128 + MLX5_CMD_OP_QUERY_XRQ = 0x719, 129 + MLX5_CMD_OP_ARM_XRQ = 0x71a, 126 130 MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750, 127 131 MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751, 128 132 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752, ··· 143 139 MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, 144 140 MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, 145 141 MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773, 142 + MLX5_CMD_OP_SET_RATE_LIMIT = 0x780, 143 + MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781, 146 144 MLX5_CMD_OP_ALLOC_PD = 0x800, 147 145 MLX5_CMD_OP_DEALLOC_PD = 0x801, 148 146 MLX5_CMD_OP_ALLOC_UAR = 0x802, ··· 368 362 }; 369 363 370 364 struct mlx5_ifc_fte_match_set_misc_bits { 371 - u8 reserved_at_0[0x20]; 365 + u8 reserved_at_0[0x8]; 366 + u8 source_sqn[0x18]; 372 367 373 368 u8 reserved_at_20[0x10]; 374 369 u8 source_port[0x10]; ··· 513 506 u8 nic_vport_port_guid_modify[0x1]; 514 507 515 508 u8 reserved_at_20[0x7e0]; 509 + }; 510 + 511 + struct mlx5_ifc_qos_cap_bits { 512 + u8 packet_pacing[0x1]; 513 + u8 reserved_0[0x1f]; 514 + u8 reserved_1[0x20]; 515 + u8 packet_pacing_max_rate[0x20]; 516 + u8 packet_pacing_min_rate[0x20]; 517 + u8 reserved_2[0x10]; 518 + u8 packet_pacing_rate_table_size[0x10]; 519 + u8 reserved_3[0x760]; 516 520 }; 517 521 518 522 struct mlx5_ifc_per_protocol_networking_offload_caps_bits { ··· 765 747 766 748 u8 out_of_seq_cnt[0x1]; 767 749 u8 vport_counters[0x1]; 768 - u8 reserved_at_182[0x4]; 750 + u8 retransmission_q_counters[0x1]; 751 + u8 reserved_at_183[0x3]; 769 752 u8 max_qp_cnt[0xa]; 770 753 u8 pkey_table_size[0x10]; 771 754 ··· 793 774 u8 log_max_msg[0x5]; 794 775 u8 reserved_at_1c8[0x4]; 795 776 u8 max_tc[0x4]; 796 - u8 reserved_at_1d0[0x6]; 777 + u8 reserved_at_1d0[0x1]; 778 + u8 dcbx[0x1]; 779 + u8 reserved_at_1d2[0x4]; 797 780 u8 rol_s[0x1]; 798 781 u8 rol_g[0x1]; 799 782 u8 reserved_at_1d8[0x1]; ··· 827 806 u8 tph[0x1]; 828 807 u8 rf[0x1]; 829 808 u8 dct[0x1]; 830 - u8 reserved_at_21b[0x1]; 809 + u8 qos[0x1]; 831 810 u8 eth_net_offloads[0x1]; 832 811 u8 roce[0x1]; 833 812 u8 atomic[0x1]; ··· 953 932 u8 cqe_compression_timeout[0x10]; 954 933 u8 cqe_compression_max_num[0x10]; 955 934 956 - u8 reserved_at_5e0[0x220]; 935 + u8 reserved_at_5e0[0x10]; 936 + u8 tag_matching[0x1]; 937 + u8 rndv_offload_rc[0x1]; 938 + u8 rndv_offload_dc[0x1]; 939 + u8 log_tag_matching_list_sz[0x5]; 940 + u8 reserved_at_5e8[0x3]; 941 + u8 log_max_xrq[0x5]; 942 + 943 + u8 reserved_at_5f0[0x200]; 957 944 }; 958 945 959 946 enum mlx5_flow_destination_type { ··· 1999 1970 2000 1971 u8 reserved_at_560[0x5]; 2001 1972 u8 rq_type[0x3]; 2002 - u8 srqn_rmpn[0x18]; 1973 + u8 srqn_rmpn_xrqn[0x18]; 2003 1974 2004 1975 u8 reserved_at_580[0x8]; 2005 1976 u8 rmsn[0x18]; ··· 2050 2021 struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap; 2051 2022 struct mlx5_ifc_e_switch_cap_bits e_switch_cap; 2052 2023 struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap; 2024 + struct mlx5_ifc_qos_cap_bits qos_cap; 2053 2025 u8 reserved_at_0[0x8000]; 2054 2026 }; 2055 2027 ··· 2277 2247 u8 reserved_at_40[0x8]; 2278 2248 u8 cqn[0x18]; 2279 2249 2280 - u8 reserved_at_60[0xa0]; 2250 + u8 reserved_at_60[0x90]; 2281 2251 2252 + u8 packet_pacing_rate_limit_index[0x10]; 2282 2253 u8 tis_lst_sz[0x10]; 2283 2254 u8 reserved_at_110[0x10]; 2284 2255 ··· 2627 2596 u8 reserved_at_98[0x8]; 2628 2597 2629 2598 u8 reserved_at_a0[0x8]; 2630 - u8 srqn[0x18]; 2599 + u8 srqn_xrqn[0x18]; 2631 2600 2632 2601 u8 reserved_at_c0[0x8]; 2633 2602 u8 pd[0x18]; ··· 2679 2648 enum { 2680 2649 MLX5_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, 2681 2650 MLX5_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, 2651 + MLX5_CQ_PERIOD_NUM_MODES 2682 2652 }; 2683 2653 2684 2654 struct mlx5_ifc_cqc_bits { ··· 2755 2723 u8 vsd[208][0x8]; 2756 2724 2757 2725 u8 vsd_contd_psid[16][0x8]; 2726 + }; 2727 + 2728 + enum { 2729 + MLX5_XRQC_STATE_GOOD = 0x0, 2730 + MLX5_XRQC_STATE_ERROR = 0x1, 2731 + }; 2732 + 2733 + enum { 2734 + MLX5_XRQC_TOPOLOGY_NO_SPECIAL_TOPOLOGY = 0x0, 2735 + MLX5_XRQC_TOPOLOGY_TAG_MATCHING = 0x1, 2736 + }; 2737 + 2738 + enum { 2739 + MLX5_XRQC_OFFLOAD_RNDV = 0x1, 2740 + }; 2741 + 2742 + struct mlx5_ifc_tag_matching_topology_context_bits { 2743 + u8 log_matching_list_sz[0x4]; 2744 + u8 reserved_at_4[0xc]; 2745 + u8 append_next_index[0x10]; 2746 + 2747 + u8 sw_phase_cnt[0x10]; 2748 + u8 hw_phase_cnt[0x10]; 2749 + 2750 + u8 reserved_at_40[0x40]; 2751 + }; 2752 + 2753 + struct mlx5_ifc_xrqc_bits { 2754 + u8 state[0x4]; 2755 + u8 rlkey[0x1]; 2756 + u8 reserved_at_5[0xf]; 2757 + u8 topology[0x4]; 2758 + u8 reserved_at_18[0x4]; 2759 + u8 offload[0x4]; 2760 + 2761 + u8 reserved_at_20[0x8]; 2762 + u8 user_index[0x18]; 2763 + 2764 + u8 reserved_at_40[0x8]; 2765 + u8 cqn[0x18]; 2766 + 2767 + u8 reserved_at_60[0xa0]; 2768 + 2769 + struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context; 2770 + 2771 + u8 reserved_at_180[0x180]; 2772 + 2773 + struct mlx5_ifc_wq_bits wq; 2758 2774 }; 2759 2775 2760 2776 union mlx5_ifc_modify_field_select_resize_field_select_auto_bits { ··· 3227 3147 u8 reserved_at_800[0x80]; 3228 3148 }; 3229 3149 3150 + struct mlx5_ifc_query_xrq_out_bits { 3151 + u8 status[0x8]; 3152 + u8 reserved_at_8[0x18]; 3153 + 3154 + u8 syndrome[0x20]; 3155 + 3156 + u8 reserved_at_40[0x40]; 3157 + 3158 + struct mlx5_ifc_xrqc_bits xrq_context; 3159 + }; 3160 + 3161 + struct mlx5_ifc_query_xrq_in_bits { 3162 + u8 opcode[0x10]; 3163 + u8 reserved_at_10[0x10]; 3164 + 3165 + u8 reserved_at_20[0x10]; 3166 + u8 op_mod[0x10]; 3167 + 3168 + u8 reserved_at_40[0x8]; 3169 + u8 xrqn[0x18]; 3170 + 3171 + u8 reserved_at_60[0x20]; 3172 + }; 3173 + 3230 3174 struct mlx5_ifc_query_xrc_srq_out_bits { 3231 3175 u8 status[0x8]; 3232 3176 u8 reserved_at_8[0x18]; ··· 3654 3550 3655 3551 u8 out_of_sequence[0x20]; 3656 3552 3657 - u8 reserved_at_1e0[0x620]; 3553 + u8 reserved_at_1e0[0x20]; 3554 + 3555 + u8 duplicate_request[0x20]; 3556 + 3557 + u8 reserved_at_220[0x20]; 3558 + 3559 + u8 rnr_nak_retry_err[0x20]; 3560 + 3561 + u8 reserved_at_260[0x20]; 3562 + 3563 + u8 packet_seq_err[0x20]; 3564 + 3565 + u8 reserved_at_2a0[0x20]; 3566 + 3567 + u8 implied_nak_seq_err[0x20]; 3568 + 3569 + u8 reserved_at_2e0[0x20]; 3570 + 3571 + u8 local_ack_timeout_err[0x20]; 3572 + 3573 + u8 reserved_at_320[0x4e0]; 3658 3574 }; 3659 3575 3660 3576 struct mlx5_ifc_query_q_counter_in_bits { ··· 5128 5004 u8 multicast_gid[16][0x8]; 5129 5005 }; 5130 5006 5007 + struct mlx5_ifc_destroy_xrq_out_bits { 5008 + u8 status[0x8]; 5009 + u8 reserved_at_8[0x18]; 5010 + 5011 + u8 syndrome[0x20]; 5012 + 5013 + u8 reserved_at_40[0x40]; 5014 + }; 5015 + 5016 + struct mlx5_ifc_destroy_xrq_in_bits { 5017 + u8 opcode[0x10]; 5018 + u8 reserved_at_10[0x10]; 5019 + 5020 + u8 reserved_at_20[0x10]; 5021 + u8 op_mod[0x10]; 5022 + 5023 + u8 reserved_at_40[0x8]; 5024 + u8 xrqn[0x18]; 5025 + 5026 + u8 reserved_at_60[0x20]; 5027 + }; 5028 + 5131 5029 struct mlx5_ifc_destroy_xrc_srq_out_bits { 5132 5030 u8 status[0x8]; 5133 5031 u8 reserved_at_8[0x18]; ··· 5735 5589 u8 reserved_at_60[0x20]; 5736 5590 }; 5737 5591 5592 + struct mlx5_ifc_create_xrq_out_bits { 5593 + u8 status[0x8]; 5594 + u8 reserved_at_8[0x18]; 5595 + 5596 + u8 syndrome[0x20]; 5597 + 5598 + u8 reserved_at_40[0x8]; 5599 + u8 xrqn[0x18]; 5600 + 5601 + u8 reserved_at_60[0x20]; 5602 + }; 5603 + 5604 + struct mlx5_ifc_create_xrq_in_bits { 5605 + u8 opcode[0x10]; 5606 + u8 reserved_at_10[0x10]; 5607 + 5608 + u8 reserved_at_20[0x10]; 5609 + u8 op_mod[0x10]; 5610 + 5611 + u8 reserved_at_40[0x40]; 5612 + 5613 + struct mlx5_ifc_xrqc_bits xrq_context; 5614 + }; 5615 + 5738 5616 struct mlx5_ifc_create_xrc_srq_out_bits { 5739 5617 u8 status[0x8]; 5740 5618 u8 reserved_at_8[0x18]; ··· 6300 6130 u8 multicast_gid[16][0x8]; 6301 6131 }; 6302 6132 6133 + struct mlx5_ifc_arm_xrq_out_bits { 6134 + u8 status[0x8]; 6135 + u8 reserved_at_8[0x18]; 6136 + 6137 + u8 syndrome[0x20]; 6138 + 6139 + u8 reserved_at_40[0x40]; 6140 + }; 6141 + 6142 + struct mlx5_ifc_arm_xrq_in_bits { 6143 + u8 opcode[0x10]; 6144 + u8 reserved_at_10[0x10]; 6145 + 6146 + u8 reserved_at_20[0x10]; 6147 + u8 op_mod[0x10]; 6148 + 6149 + u8 reserved_at_40[0x8]; 6150 + u8 xrqn[0x18]; 6151 + 6152 + u8 reserved_at_60[0x10]; 6153 + u8 lwm[0x10]; 6154 + }; 6155 + 6303 6156 struct mlx5_ifc_arm_xrc_srq_out_bits { 6304 6157 u8 status[0x8]; 6305 6158 u8 reserved_at_8[0x18]; ··· 6360 6167 }; 6361 6168 6362 6169 enum { 6363 - MLX5_ARM_RQ_IN_OP_MOD_SRQ_ = 0x1, 6170 + MLX5_ARM_RQ_IN_OP_MOD_SRQ = 0x1, 6171 + MLX5_ARM_RQ_IN_OP_MOD_XRQ = 0x2, 6364 6172 }; 6365 6173 6366 6174 struct mlx5_ifc_arm_rq_in_bits { ··· 6554 6360 u8 vxlan_udp_port[0x10]; 6555 6361 }; 6556 6362 6363 + struct mlx5_ifc_set_rate_limit_out_bits { 6364 + u8 status[0x8]; 6365 + u8 reserved_at_8[0x18]; 6366 + 6367 + u8 syndrome[0x20]; 6368 + 6369 + u8 reserved_at_40[0x40]; 6370 + }; 6371 + 6372 + struct mlx5_ifc_set_rate_limit_in_bits { 6373 + u8 opcode[0x10]; 6374 + u8 reserved_at_10[0x10]; 6375 + 6376 + u8 reserved_at_20[0x10]; 6377 + u8 op_mod[0x10]; 6378 + 6379 + u8 reserved_at_40[0x10]; 6380 + u8 rate_limit_index[0x10]; 6381 + 6382 + u8 reserved_at_60[0x20]; 6383 + 6384 + u8 rate_limit[0x20]; 6385 + }; 6386 + 6557 6387 struct mlx5_ifc_access_register_out_bits { 6558 6388 u8 status[0x8]; 6559 6389 u8 reserved_at_8[0x18]; ··· 6702 6484 }; 6703 6485 6704 6486 struct mlx5_ifc_ptys_reg_bits { 6705 - u8 reserved_at_0[0x8]; 6487 + u8 an_disable_cap[0x1]; 6488 + u8 an_disable_admin[0x1]; 6489 + u8 reserved_at_2[0x6]; 6706 6490 u8 local_port[0x8]; 6707 6491 u8 reserved_at_10[0xd]; 6708 6492 u8 proto_mask[0x3]; 6709 6493 6710 - u8 reserved_at_20[0x40]; 6494 + u8 an_status[0x4]; 6495 + u8 reserved_at_24[0x3c]; 6711 6496 6712 6497 u8 eth_proto_capability[0x20]; 6713 6498 ··· 7671 7450 u8 dword_11[0x20]; 7672 7451 }; 7673 7452 7453 + struct mlx5_ifc_dcbx_param_bits { 7454 + u8 dcbx_cee_cap[0x1]; 7455 + u8 dcbx_ieee_cap[0x1]; 7456 + u8 dcbx_standby_cap[0x1]; 7457 + u8 reserved_at_0[0x5]; 7458 + u8 port_number[0x8]; 7459 + u8 reserved_at_10[0xa]; 7460 + u8 max_application_table_size[6]; 7461 + u8 reserved_at_20[0x15]; 7462 + u8 version_oper[0x3]; 7463 + u8 reserved_at_38[5]; 7464 + u8 version_admin[0x3]; 7465 + u8 willing_admin[0x1]; 7466 + u8 reserved_at_41[0x3]; 7467 + u8 pfc_cap_oper[0x4]; 7468 + u8 reserved_at_48[0x4]; 7469 + u8 pfc_cap_admin[0x4]; 7470 + u8 reserved_at_50[0x4]; 7471 + u8 num_of_tc_oper[0x4]; 7472 + u8 reserved_at_58[0x4]; 7473 + u8 num_of_tc_admin[0x4]; 7474 + u8 remote_willing[0x1]; 7475 + u8 reserved_at_61[3]; 7476 + u8 remote_pfc_cap[4]; 7477 + u8 reserved_at_68[0x14]; 7478 + u8 remote_num_of_tc[0x4]; 7479 + u8 reserved_at_80[0x18]; 7480 + u8 error[0x8]; 7481 + u8 reserved_at_a0[0x160]; 7482 + }; 7674 7483 #endif /* MLX5_IFC_H */
+2 -2
include/linux/mlx5/qp.h
··· 555 555 struct mlx5_modify_qp_mbox_in { 556 556 struct mlx5_inbox_hdr hdr; 557 557 __be32 qpn; 558 - u8 rsvd1[4]; 559 - __be32 optparam; 560 558 u8 rsvd0[4]; 559 + __be32 optparam; 560 + u8 rsvd1[4]; 561 561 struct mlx5_qp_context ctx; 562 562 u8 rsvd2[16]; 563 563 };
+25
include/linux/mlx5/srq.h
··· 35 35 36 36 #include <linux/mlx5/driver.h> 37 37 38 + enum { 39 + MLX5_SRQ_FLAG_ERR = (1 << 0), 40 + MLX5_SRQ_FLAG_WQ_SIG = (1 << 1), 41 + }; 42 + 43 + struct mlx5_srq_attr { 44 + u32 type; 45 + u32 flags; 46 + u32 log_size; 47 + u32 wqe_shift; 48 + u32 log_page_size; 49 + u32 wqe_cnt; 50 + u32 srqn; 51 + u32 xrcd; 52 + u32 page_offset; 53 + u32 cqn; 54 + u32 pd; 55 + u32 lwm; 56 + u32 user_index; 57 + u64 db_record; 58 + u64 *pas; 59 + }; 60 + 61 + struct mlx5_core_dev; 62 + 38 63 void mlx5_init_srq_table(struct mlx5_core_dev *dev); 39 64 void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev); 40 65
+101 -1
include/rdma/ib_verbs.h
··· 562 562 IB_EVENT_QP_LAST_WQE_REACHED, 563 563 IB_EVENT_CLIENT_REREGISTER, 564 564 IB_EVENT_GID_CHANGE, 565 + IB_EVENT_WQ_FATAL, 565 566 }; 566 567 567 568 const char *__attribute_const__ ib_event_msg(enum ib_event_type event); ··· 573 572 struct ib_cq *cq; 574 573 struct ib_qp *qp; 575 574 struct ib_srq *srq; 575 + struct ib_wq *wq; 576 576 u8 port_num; 577 577 } element; 578 578 enum ib_event_type event; ··· 1017 1015 * Only needed for special QP types, or when using the RW API. 1018 1016 */ 1019 1017 u8 port_num; 1018 + struct ib_rwq_ind_table *rwq_ind_tbl; 1020 1019 }; 1021 1020 1022 1021 struct ib_qp_open_attr { ··· 1326 1323 struct list_head ah_list; 1327 1324 struct list_head xrcd_list; 1328 1325 struct list_head rule_list; 1326 + struct list_head wq_list; 1327 + struct list_head rwq_ind_tbl_list; 1329 1328 int closing; 1330 1329 1331 1330 struct pid *tgid; ··· 1433 1428 } ext; 1434 1429 }; 1435 1430 1431 + enum ib_wq_type { 1432 + IB_WQT_RQ 1433 + }; 1434 + 1435 + enum ib_wq_state { 1436 + IB_WQS_RESET, 1437 + IB_WQS_RDY, 1438 + IB_WQS_ERR 1439 + }; 1440 + 1441 + struct ib_wq { 1442 + struct ib_device *device; 1443 + struct ib_uobject *uobject; 1444 + void *wq_context; 1445 + void (*event_handler)(struct ib_event *, void *); 1446 + struct ib_pd *pd; 1447 + struct ib_cq *cq; 1448 + u32 wq_num; 1449 + enum ib_wq_state state; 1450 + enum ib_wq_type wq_type; 1451 + atomic_t usecnt; 1452 + }; 1453 + 1454 + struct ib_wq_init_attr { 1455 + void *wq_context; 1456 + enum ib_wq_type wq_type; 1457 + u32 max_wr; 1458 + u32 max_sge; 1459 + struct ib_cq *cq; 1460 + void (*event_handler)(struct ib_event *, void *); 1461 + }; 1462 + 1463 + enum ib_wq_attr_mask { 1464 + IB_WQ_STATE = 1 << 0, 1465 + IB_WQ_CUR_STATE = 1 << 1, 1466 + }; 1467 + 1468 + struct ib_wq_attr { 1469 + enum ib_wq_state wq_state; 1470 + enum ib_wq_state curr_wq_state; 1471 + }; 1472 + 1473 + struct ib_rwq_ind_table { 1474 + struct ib_device *device; 1475 + struct ib_uobject *uobject; 1476 + atomic_t usecnt; 1477 + u32 ind_tbl_num; 1478 + u32 log_ind_tbl_size; 1479 + struct ib_wq **ind_tbl; 1480 + }; 1481 + 1482 + struct ib_rwq_ind_table_init_attr { 1483 + u32 log_ind_tbl_size; 1484 + /* Each entry is a pointer to Receive Work Queue */ 1485 + struct ib_wq **ind_tbl; 1486 + }; 1487 + 1436 1488 struct ib_qp { 1437 1489 struct ib_device *device; 1438 1490 struct ib_pd *pd; ··· 1512 1450 void *qp_context; 1513 1451 u32 qp_num; 1514 1452 enum ib_qp_type qp_type; 1453 + struct ib_rwq_ind_table *rwq_ind_tbl; 1515 1454 }; 1516 1455 1517 1456 struct ib_mr { ··· 1569 1506 IB_FLOW_SPEC_IB = 0x22, 1570 1507 /* L3 header*/ 1571 1508 IB_FLOW_SPEC_IPV4 = 0x30, 1509 + IB_FLOW_SPEC_IPV6 = 0x31, 1572 1510 /* L4 headers*/ 1573 1511 IB_FLOW_SPEC_TCP = 0x40, 1574 1512 IB_FLOW_SPEC_UDP = 0x41 ··· 1631 1567 struct ib_flow_ipv4_filter mask; 1632 1568 }; 1633 1569 1570 + struct ib_flow_ipv6_filter { 1571 + u8 src_ip[16]; 1572 + u8 dst_ip[16]; 1573 + }; 1574 + 1575 + struct ib_flow_spec_ipv6 { 1576 + enum ib_flow_spec_type type; 1577 + u16 size; 1578 + struct ib_flow_ipv6_filter val; 1579 + struct ib_flow_ipv6_filter mask; 1580 + }; 1581 + 1634 1582 struct ib_flow_tcp_udp_filter { 1635 1583 __be16 dst_port; 1636 1584 __be16 src_port; ··· 1664 1588 struct ib_flow_spec_ib ib; 1665 1589 struct ib_flow_spec_ipv4 ipv4; 1666 1590 struct ib_flow_spec_tcp_udp tcp_udp; 1591 + struct ib_flow_spec_ipv6 ipv6; 1667 1592 }; 1668 1593 1669 1594 struct ib_flow_attr { ··· 1998 1921 struct ifla_vf_stats *stats); 1999 1922 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid, 2000 1923 int type); 2001 - 1924 + struct ib_wq * (*create_wq)(struct ib_pd *pd, 1925 + struct ib_wq_init_attr *init_attr, 1926 + struct ib_udata *udata); 1927 + int (*destroy_wq)(struct ib_wq *wq); 1928 + int (*modify_wq)(struct ib_wq *wq, 1929 + struct ib_wq_attr *attr, 1930 + u32 wq_attr_mask, 1931 + struct ib_udata *udata); 1932 + struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device, 1933 + struct ib_rwq_ind_table_init_attr *init_attr, 1934 + struct ib_udata *udata); 1935 + int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); 2002 1936 struct ib_dma_mapping_ops *dma_ops; 2003 1937 2004 1938 struct module *owner; ··· 2044 1956 * in fast paths. 2045 1957 */ 2046 1958 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *); 1959 + void (*get_dev_fw_str)(struct ib_device *, char *str, size_t str_len); 2047 1960 }; 2048 1961 2049 1962 struct ib_client { ··· 2079 1990 2080 1991 struct ib_device *ib_alloc_device(size_t size); 2081 1992 void ib_dealloc_device(struct ib_device *device); 1993 + 1994 + void ib_get_device_fw_str(struct ib_device *device, char *str, size_t str_len); 2082 1995 2083 1996 int ib_register_device(struct ib_device *device, 2084 1997 int (*port_callback)(struct ib_device *, ··· 3258 3167 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, 3259 3168 u16 pkey, const union ib_gid *gid, 3260 3169 const struct sockaddr *addr); 3170 + struct ib_wq *ib_create_wq(struct ib_pd *pd, 3171 + struct ib_wq_init_attr *init_attr); 3172 + int ib_destroy_wq(struct ib_wq *wq); 3173 + int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr, 3174 + u32 wq_attr_mask); 3175 + struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, 3176 + struct ib_rwq_ind_table_init_attr* 3177 + wq_ind_table_init_attr); 3178 + int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); 3261 3179 3262 3180 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 3263 3181 unsigned int *sg_offset, unsigned int page_size);
+95
include/uapi/rdma/ib_user_verbs.h
··· 95 95 IB_USER_VERBS_EX_CMD_CREATE_QP = IB_USER_VERBS_CMD_CREATE_QP, 96 96 IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD, 97 97 IB_USER_VERBS_EX_CMD_DESTROY_FLOW, 98 + IB_USER_VERBS_EX_CMD_CREATE_WQ, 99 + IB_USER_VERBS_EX_CMD_MODIFY_WQ, 100 + IB_USER_VERBS_EX_CMD_DESTROY_WQ, 101 + IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL, 102 + IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL 98 103 }; 99 104 100 105 /* ··· 523 518 __u64 driver_data[0]; 524 519 }; 525 520 521 + enum ib_uverbs_create_qp_mask { 522 + IB_UVERBS_CREATE_QP_MASK_IND_TABLE = 1UL << 0, 523 + }; 524 + 525 + enum { 526 + IB_UVERBS_CREATE_QP_SUP_COMP_MASK = IB_UVERBS_CREATE_QP_MASK_IND_TABLE, 527 + }; 528 + 526 529 struct ib_uverbs_ex_create_qp { 527 530 __u64 user_handle; 528 531 __u32 pd_handle; ··· 548 535 __u8 reserved; 549 536 __u32 comp_mask; 550 537 __u32 create_flags; 538 + __u32 rwq_ind_tbl_handle; 539 + __u32 reserved1; 551 540 }; 552 541 553 542 struct ib_uverbs_open_qp { ··· 867 852 struct ib_uverbs_flow_tcp_udp_filter mask; 868 853 }; 869 854 855 + struct ib_uverbs_flow_ipv6_filter { 856 + __u8 src_ip[16]; 857 + __u8 dst_ip[16]; 858 + }; 859 + 860 + struct ib_uverbs_flow_spec_ipv6 { 861 + union { 862 + struct ib_uverbs_flow_spec_hdr hdr; 863 + struct { 864 + __u32 type; 865 + __u16 size; 866 + __u16 reserved; 867 + }; 868 + }; 869 + struct ib_uverbs_flow_ipv6_filter val; 870 + struct ib_uverbs_flow_ipv6_filter mask; 871 + }; 872 + 870 873 struct ib_uverbs_flow_attr { 871 874 __u32 type; 872 875 __u16 size; ··· 977 944 978 945 struct ib_uverbs_destroy_srq_resp { 979 946 __u32 events_reported; 947 + }; 948 + 949 + struct ib_uverbs_ex_create_wq { 950 + __u32 comp_mask; 951 + __u32 wq_type; 952 + __u64 user_handle; 953 + __u32 pd_handle; 954 + __u32 cq_handle; 955 + __u32 max_wr; 956 + __u32 max_sge; 957 + }; 958 + 959 + struct ib_uverbs_ex_create_wq_resp { 960 + __u32 comp_mask; 961 + __u32 response_length; 962 + __u32 wq_handle; 963 + __u32 max_wr; 964 + __u32 max_sge; 965 + __u32 wqn; 966 + }; 967 + 968 + struct ib_uverbs_ex_destroy_wq { 969 + __u32 comp_mask; 970 + __u32 wq_handle; 971 + }; 972 + 973 + struct ib_uverbs_ex_destroy_wq_resp { 974 + __u32 comp_mask; 975 + __u32 response_length; 976 + __u32 events_reported; 977 + __u32 reserved; 978 + }; 979 + 980 + struct ib_uverbs_ex_modify_wq { 981 + __u32 attr_mask; 982 + __u32 wq_handle; 983 + __u32 wq_state; 984 + __u32 curr_wq_state; 985 + }; 986 + 987 + /* Prevent memory allocation rather than max expected size */ 988 + #define IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE 0x0d 989 + struct ib_uverbs_ex_create_rwq_ind_table { 990 + __u32 comp_mask; 991 + __u32 log_ind_tbl_size; 992 + /* Following are the wq handles according to log_ind_tbl_size 993 + * wq_handle1 994 + * wq_handle2 995 + */ 996 + __u32 wq_handles[0]; 997 + }; 998 + 999 + struct ib_uverbs_ex_create_rwq_ind_table_resp { 1000 + __u32 comp_mask; 1001 + __u32 response_length; 1002 + __u32 ind_tbl_handle; 1003 + __u32 ind_tbl_num; 1004 + }; 1005 + 1006 + struct ib_uverbs_ex_destroy_rwq_ind_table { 1007 + __u32 comp_mask; 1008 + __u32 ind_tbl_handle; 980 1009 }; 981 1010 982 1011 #endif /* IB_USER_VERBS_H */