Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tags 'for-linus' and 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull more rdma updates from Doug Ledford:
"As mentioned in my first pull request, this is the subsequent pull
requests I had. This is all I have, and in fact this cleans out the
RDMA subsystem's entire patchworks queue of kernel changes that are
ready to go (well, it did for the weekend anyway, a few new patches
are in, but they'll be coming during the -rc cycle).

The first tag contains a single patch that would have conflicted if
taken from my tree or DaveM's tree as it needed our trees merged to
come cleanly.

The second tag contains the patch series from Intel plus three other
stragllers that came in late last week. I took them because it allowed
me to legitimately claim that the RDMA patchworks queue was, for a
short time, 100% cleared of all waiting kernel patches, woohoo! :-).

I have it under my for-next tag, so it did get 0day and linux- next
over the end of last week, and linux-next did show one minor conflict.

Summary:

'for-linus' tag:
- mlx5/IPoIB fixup patch

'for-next' tag:
- the hfi1 15 patch set that landed late
- IPoIB get_link_ksettings which landed late because I asked for a
respin
- one late rxe change
- one -rc worthy fix that's in early"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma:
IB/mlx5: Enable IPoIB acceleration

* tag 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma:
rxe: expose num_possible_cpus() cnum_comp_vectors
IB/rxe: Update caller's CRC for RXE_MEM_TYPE_DMA memory type
IB/hfi1: Clean up on context initialization failure
IB/hfi1: Fix an assign/ordering issue with shared context IDs
IB/hfi1: Clean up context initialization
IB/hfi1: Correctly clear the pkey
IB/hfi1: Search shared contexts on the opened device, not all devices
IB/hfi1: Remove atomic operations for SDMA_REQ_HAVE_AHG bit
IB/hfi1: Use filedata rather than filepointer
IB/hfi1: Name function prototype parameters
IB/hfi1: Fix a subcontext memory leak
IB/hfi1: Return an error on memory allocation failure
IB/hfi1: Adjust default eager_buffer_size to 8MB
IB/hfi1: Get rid of divide when setting the tx request header
IB/hfi1: Fix yield logic in send engine
IB/hfi1, IB/rdmavt: Move r_adefered to r_lock cache line
IB/hfi1: Fix checks for Offline transient state
IB/ipoib: add get_link_ksettings in ethtool

+773 -632
+20 -27
drivers/infiniband/hw/hfi1/chip.c
··· 1055 1055 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg); 1056 1056 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg); 1057 1057 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg); 1058 - static void set_partition_keys(struct hfi1_pportdata *); 1058 + static void set_partition_keys(struct hfi1_pportdata *ppd); 1059 1059 static const char *link_state_name(u32 state); 1060 1060 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, 1061 1061 u32 state); ··· 1068 1068 int msecs); 1069 1069 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc); 1070 1070 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr); 1071 - static void handle_temp_err(struct hfi1_devdata *); 1072 - static void dc_shutdown(struct hfi1_devdata *); 1073 - static void dc_start(struct hfi1_devdata *); 1071 + static void handle_temp_err(struct hfi1_devdata *dd); 1072 + static void dc_shutdown(struct hfi1_devdata *dd); 1073 + static void dc_start(struct hfi1_devdata *dd); 1074 1074 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, 1075 1075 unsigned int *np); 1076 1076 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd); ··· 10233 10233 if (pstate == PLS_OFFLINE) { 10234 10234 do_transition = 0; /* in right state */ 10235 10235 do_wait = 0; /* ...no need to wait */ 10236 - } else if ((pstate & 0xff) == PLS_OFFLINE) { 10236 + } else if ((pstate & 0xf0) == PLS_OFFLINE) { 10237 10237 do_transition = 0; /* in an offline transient state */ 10238 10238 do_wait = 1; /* ...wait for it to settle */ 10239 10239 } else { ··· 12662 12662 #define SET_STATIC_RATE_CONTROL_SMASK(r) \ 12663 12663 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) 12664 12664 12665 - int hfi1_init_ctxt(struct send_context *sc) 12665 + void hfi1_init_ctxt(struct send_context *sc) 12666 12666 { 12667 12667 if (sc) { 12668 12668 struct hfi1_devdata *dd = sc->dd; ··· 12679 12679 write_kctxt_csr(dd, sc->hw_context, 12680 12680 SEND_CTXT_CHECK_ENABLE, reg); 12681 12681 } 12682 - return 0; 12683 12682 } 12684 12683 12685 12684 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp) ··· 14527 14528 return ret; 14528 14529 } 14529 14530 14530 - int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt) 14531 + int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt) 14531 14532 { 14532 - struct hfi1_ctxtdata *rcd; 14533 - unsigned sctxt; 14534 - int ret = 0; 14533 + u8 hw_ctxt; 14535 14534 u64 reg; 14536 14535 14537 - if (ctxt < dd->num_rcv_contexts) { 14538 - rcd = dd->rcd[ctxt]; 14539 - } else { 14540 - ret = -EINVAL; 14541 - goto done; 14542 - } 14543 - if (!rcd || !rcd->sc) { 14544 - ret = -EINVAL; 14545 - goto done; 14546 - } 14547 - sctxt = rcd->sc->hw_context; 14548 - reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE); 14536 + if (!ctxt || !ctxt->sc) 14537 + return -EINVAL; 14538 + 14539 + if (ctxt->ctxt >= dd->num_rcv_contexts) 14540 + return -EINVAL; 14541 + 14542 + hw_ctxt = ctxt->sc->hw_context; 14543 + reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE); 14549 14544 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK; 14550 - write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg); 14551 - write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0); 14552 - done: 14553 - return ret; 14545 + write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg); 14546 + write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0); 14547 + 14548 + return 0; 14554 14549 } 14555 14550 14556 14551 /*
+6 -4
drivers/infiniband/hw/hfi1/chip.h
··· 636 636 write_csr(dd, offset0 + (0x1000 * ctxt), value); 637 637 } 638 638 639 - u64 create_pbc(struct hfi1_pportdata *ppd, u64, int, u32, u32); 639 + u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl, 640 + u32 dw_len); 640 641 641 642 /* firmware.c */ 642 643 #define SBUS_MASTER_BROADCAST 0xfd ··· 729 728 void set_intr_state(struct hfi1_devdata *dd, u32 enable); 730 729 void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, 731 730 int refresh_widths); 732 - void update_usrhead(struct hfi1_ctxtdata *, u32, u32, u32, u32, u32); 731 + void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd, 732 + u32 intr_adjust, u32 npkts); 733 733 int stop_drain_data_vls(struct hfi1_devdata *dd); 734 734 int open_fill_data_vls(struct hfi1_devdata *dd); 735 735 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns); ··· 1349 1347 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd); 1350 1348 struct ib_header *hfi1_get_msgheader( 1351 1349 struct hfi1_devdata *dd, __le32 *rhf_addr); 1352 - int hfi1_init_ctxt(struct send_context *sc); 1350 + void hfi1_init_ctxt(struct send_context *sc); 1353 1351 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index, 1354 1352 u32 type, unsigned long pa, u16 order); 1355 1353 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd); ··· 1362 1360 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey); 1363 1361 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt); 1364 1362 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey); 1365 - int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt); 1363 + int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt); 1366 1364 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality); 1367 1365 void hfi1_init_vnic_rsm(struct hfi1_devdata *dd); 1368 1366 void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd);
+3 -39
drivers/infiniband/hw/hfi1/driver.c
··· 85 85 MODULE_PARM_DESC(cu, "Credit return units"); 86 86 87 87 unsigned long hfi1_cap_mask = HFI1_CAP_MASK_DEFAULT; 88 - static int hfi1_caps_set(const char *, const struct kernel_param *); 89 - static int hfi1_caps_get(char *, const struct kernel_param *); 88 + static int hfi1_caps_set(const char *val, const struct kernel_param *kp); 89 + static int hfi1_caps_get(char *buffer, const struct kernel_param *kp); 90 90 static const struct kernel_param_ops cap_ops = { 91 91 .set = hfi1_caps_set, 92 92 .get = hfi1_caps_get ··· 208 208 } 209 209 spin_unlock_irqrestore(&hfi1_devs_lock, flags); 210 210 return nunits_active; 211 - } 212 - 213 - /* 214 - * Return count of all units, optionally return in arguments 215 - * the number of usable (present) units, and the number of 216 - * ports that are up. 217 - */ 218 - int hfi1_count_units(int *npresentp, int *nupp) 219 - { 220 - int nunits = 0, npresent = 0, nup = 0; 221 - struct hfi1_devdata *dd; 222 - unsigned long flags; 223 - int pidx; 224 - struct hfi1_pportdata *ppd; 225 - 226 - spin_lock_irqsave(&hfi1_devs_lock, flags); 227 - 228 - list_for_each_entry(dd, &hfi1_dev_list, list) { 229 - nunits++; 230 - if ((dd->flags & HFI1_PRESENT) && dd->kregbase) 231 - npresent++; 232 - for (pidx = 0; pidx < dd->num_pports; ++pidx) { 233 - ppd = dd->pport + pidx; 234 - if (ppd->lid && ppd->linkup) 235 - nup++; 236 - } 237 - } 238 - 239 - spin_unlock_irqrestore(&hfi1_devs_lock, flags); 240 - 241 - if (npresentp) 242 - *npresentp = npresent; 243 - if (nupp) 244 - *nupp = nup; 245 - 246 - return nunits; 247 211 } 248 212 249 213 /* ··· 1289 1325 if (dd->rcd) 1290 1326 for (i = dd->first_dyn_alloc_ctxt; 1291 1327 i < dd->num_rcv_contexts; i++) { 1292 - if (!dd->rcd[i] || !dd->rcd[i]->cnt) 1328 + if (!dd->rcd[i]) 1293 1329 continue; 1294 1330 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 1295 1331 ret = -EBUSY;
+225 -218
drivers/infiniband/hw/hfi1/file_ops.c
··· 49 49 #include <linux/vmalloc.h> 50 50 #include <linux/io.h> 51 51 #include <linux/sched/mm.h> 52 + #include <linux/bitmap.h> 52 53 53 54 #include <rdma/ib.h> 54 55 ··· 71 70 /* 72 71 * File operation functions 73 72 */ 74 - static int hfi1_file_open(struct inode *, struct file *); 75 - static int hfi1_file_close(struct inode *, struct file *); 76 - static ssize_t hfi1_write_iter(struct kiocb *, struct iov_iter *); 77 - static unsigned int hfi1_poll(struct file *, struct poll_table_struct *); 78 - static int hfi1_file_mmap(struct file *, struct vm_area_struct *); 73 + static int hfi1_file_open(struct inode *inode, struct file *fp); 74 + static int hfi1_file_close(struct inode *inode, struct file *fp); 75 + static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from); 76 + static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt); 77 + static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma); 79 78 80 - static u64 kvirt_to_phys(void *); 81 - static int assign_ctxt(struct file *, struct hfi1_user_info *); 82 - static int init_subctxts(struct hfi1_ctxtdata *, const struct hfi1_user_info *); 83 - static int user_init(struct file *); 84 - static int get_ctxt_info(struct file *, void __user *, __u32); 85 - static int get_base_info(struct file *, void __user *, __u32); 86 - static int setup_ctxt(struct file *); 87 - static int setup_subctxt(struct hfi1_ctxtdata *); 88 - static int get_user_context(struct file *, struct hfi1_user_info *, int); 89 - static int find_shared_ctxt(struct file *, const struct hfi1_user_info *); 90 - static int allocate_ctxt(struct file *, struct hfi1_devdata *, 91 - struct hfi1_user_info *); 92 - static unsigned int poll_urgent(struct file *, struct poll_table_struct *); 93 - static unsigned int poll_next(struct file *, struct poll_table_struct *); 94 - static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long); 95 - static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16); 96 - static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int); 97 - static int vma_fault(struct vm_fault *); 79 + static u64 kvirt_to_phys(void *addr); 80 + static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo); 81 + static int init_subctxts(struct hfi1_ctxtdata *uctxt, 82 + const struct hfi1_user_info *uinfo); 83 + static int init_user_ctxt(struct hfi1_filedata *fd); 84 + static void user_init(struct hfi1_ctxtdata *uctxt); 85 + static int get_ctxt_info(struct hfi1_filedata *fd, void __user *ubase, 86 + __u32 len); 87 + static int get_base_info(struct hfi1_filedata *fd, void __user *ubase, 88 + __u32 len); 89 + static int setup_base_ctxt(struct hfi1_filedata *fd); 90 + static int setup_subctxt(struct hfi1_ctxtdata *uctxt); 91 + 92 + static int find_sub_ctxt(struct hfi1_filedata *fd, 93 + const struct hfi1_user_info *uinfo); 94 + static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd, 95 + struct hfi1_user_info *uinfo); 96 + static unsigned int poll_urgent(struct file *fp, struct poll_table_struct *pt); 97 + static unsigned int poll_next(struct file *fp, struct poll_table_struct *pt); 98 + static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt, 99 + unsigned long events); 100 + static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, u16 subctxt, u16 pkey); 101 + static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt, 102 + int start_stop); 103 + static int vma_fault(struct vm_fault *vmf); 98 104 static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, 99 105 unsigned long arg); 100 106 ··· 181 173 struct hfi1_devdata, 182 174 user_cdev); 183 175 176 + if (!((dd->flags & HFI1_PRESENT) && dd->kregbase)) 177 + return -EINVAL; 178 + 184 179 if (!atomic_inc_not_zero(&dd->user_refcount)) 185 180 return -ENXIO; 186 181 ··· 198 187 fd->rec_cpu_num = -1; /* no cpu affinity by default */ 199 188 fd->mm = current->mm; 200 189 mmgrab(fd->mm); 190 + fd->dd = dd; 201 191 fp->private_data = fd; 202 192 } else { 203 193 fp->private_data = NULL; ··· 241 229 sizeof(uinfo))) 242 230 return -EFAULT; 243 231 244 - ret = assign_ctxt(fp, &uinfo); 245 - if (ret < 0) 246 - return ret; 247 - ret = setup_ctxt(fp); 248 - if (ret) 249 - return ret; 250 - ret = user_init(fp); 232 + ret = assign_ctxt(fd, &uinfo); 251 233 break; 252 234 case HFI1_IOCTL_CTXT_INFO: 253 - ret = get_ctxt_info(fp, (void __user *)(unsigned long)arg, 235 + ret = get_ctxt_info(fd, (void __user *)(unsigned long)arg, 254 236 sizeof(struct hfi1_ctxt_info)); 255 237 break; 256 238 case HFI1_IOCTL_USER_INFO: 257 - ret = get_base_info(fp, (void __user *)(unsigned long)arg, 239 + ret = get_base_info(fd, (void __user *)(unsigned long)arg, 258 240 sizeof(struct hfi1_base_info)); 259 241 break; 260 242 case HFI1_IOCTL_CREDIT_UPD: ··· 262 256 sizeof(tinfo))) 263 257 return -EFAULT; 264 258 265 - ret = hfi1_user_exp_rcv_setup(fp, &tinfo); 259 + ret = hfi1_user_exp_rcv_setup(fd, &tinfo); 266 260 if (!ret) { 267 261 /* 268 262 * Copy the number of tidlist entries we used ··· 284 278 sizeof(tinfo))) 285 279 return -EFAULT; 286 280 287 - ret = hfi1_user_exp_rcv_clear(fp, &tinfo); 281 + ret = hfi1_user_exp_rcv_clear(fd, &tinfo); 288 282 if (ret) 289 283 break; 290 284 addr = arg + offsetof(struct hfi1_tid_info, tidcnt); ··· 299 293 sizeof(tinfo))) 300 294 return -EFAULT; 301 295 302 - ret = hfi1_user_exp_rcv_invalid(fp, &tinfo); 296 + ret = hfi1_user_exp_rcv_invalid(fd, &tinfo); 303 297 if (ret) 304 298 break; 305 299 addr = arg + offsetof(struct hfi1_tid_info, tidcnt); ··· 436 430 unsigned long count = 0; 437 431 438 432 ret = hfi1_user_sdma_process_request( 439 - kiocb->ki_filp, (struct iovec *)(from->iov + done), 433 + fd, (struct iovec *)(from->iov + done), 440 434 dim, &count); 441 435 if (ret) { 442 436 reqs = ret; ··· 762 756 /* release the cpu */ 763 757 hfi1_put_proc_affinity(fdata->rec_cpu_num); 764 758 759 + /* clean up rcv side */ 760 + hfi1_user_exp_rcv_free(fdata); 761 + 765 762 /* 766 763 * Clear any left over, unhandled events so the next process that 767 764 * gets this context doesn't get confused. ··· 773 764 HFI1_MAX_SHARED_CTXTS) + fdata->subctxt; 774 765 *ev = 0; 775 766 776 - if (--uctxt->cnt) { 777 - uctxt->active_slaves &= ~(1 << fdata->subctxt); 767 + __clear_bit(fdata->subctxt, uctxt->in_use_ctxts); 768 + if (!bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) { 778 769 mutex_unlock(&hfi1_mutex); 779 770 goto done; 780 771 } ··· 804 795 805 796 dd->rcd[uctxt->ctxt] = NULL; 806 797 807 - hfi1_user_exp_rcv_free(fdata); 808 - hfi1_clear_ctxt_pkey(dd, uctxt->ctxt); 798 + hfi1_user_exp_rcv_grp_free(uctxt); 799 + hfi1_clear_ctxt_pkey(dd, uctxt); 809 800 810 801 uctxt->rcvwait_to = 0; 811 802 uctxt->piowait_to = 0; ··· 845 836 return paddr; 846 837 } 847 838 848 - static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo) 839 + static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo) 849 840 { 850 - int i_minor, ret = 0; 841 + int ret; 851 842 unsigned int swmajor, swminor; 852 843 853 844 swmajor = uinfo->userversion >> 16; 854 - if (swmajor != HFI1_USER_SWMAJOR) { 855 - ret = -ENODEV; 856 - goto done; 857 - } 845 + if (swmajor != HFI1_USER_SWMAJOR) 846 + return -ENODEV; 858 847 859 848 swminor = uinfo->userversion & 0xffff; 860 849 861 850 mutex_lock(&hfi1_mutex); 862 - /* First, lets check if we need to setup a shared context? */ 851 + /* 852 + * Get a sub context if necessary. 853 + * ret < 0 error, 0 no context, 1 sub-context found 854 + */ 855 + ret = 0; 863 856 if (uinfo->subctxt_cnt) { 864 - struct hfi1_filedata *fd = fp->private_data; 865 - 866 - ret = find_shared_ctxt(fp, uinfo); 867 - if (ret < 0) 868 - goto done_unlock; 869 - if (ret) { 857 + ret = find_sub_ctxt(fd, uinfo); 858 + if (ret > 0) 870 859 fd->rec_cpu_num = 871 860 hfi1_get_proc_affinity(fd->uctxt->numa_id); 872 - } 873 861 } 874 862 875 863 /* 876 - * We execute the following block if we couldn't find a 877 - * shared context or if context sharing is not required. 864 + * Allocate a base context if context sharing is not required or we 865 + * couldn't find a sub context. 878 866 */ 879 - if (!ret) { 880 - i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE; 881 - ret = get_user_context(fp, uinfo, i_minor); 882 - } 883 - done_unlock: 867 + if (!ret) 868 + ret = allocate_ctxt(fd, fd->dd, uinfo); 869 + 884 870 mutex_unlock(&hfi1_mutex); 885 - done: 886 - return ret; 887 - } 888 871 889 - static int get_user_context(struct file *fp, struct hfi1_user_info *uinfo, 890 - int devno) 891 - { 892 - struct hfi1_devdata *dd = NULL; 893 - int devmax, npresent, nup; 872 + /* Depending on the context type, do the appropriate init */ 873 + if (ret > 0) { 874 + /* 875 + * sub-context info can only be set up after the base 876 + * context has been completed. 877 + */ 878 + ret = wait_event_interruptible(fd->uctxt->wait, !test_bit( 879 + HFI1_CTXT_BASE_UNINIT, 880 + &fd->uctxt->event_flags)); 881 + if (test_bit(HFI1_CTXT_BASE_FAILED, &fd->uctxt->event_flags)) { 882 + clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts); 883 + return -ENOMEM; 884 + } 885 + /* The only thing a sub context needs is the user_xxx stuff */ 886 + if (!ret) 887 + ret = init_user_ctxt(fd); 894 888 895 - devmax = hfi1_count_units(&npresent, &nup); 896 - if (!npresent) 897 - return -ENXIO; 898 - 899 - if (!nup) 900 - return -ENETDOWN; 901 - 902 - dd = hfi1_lookup(devno); 903 - if (!dd) 904 - return -ENODEV; 905 - else if (!dd->freectxts) 906 - return -EBUSY; 907 - 908 - return allocate_ctxt(fp, dd, uinfo); 909 - } 910 - 911 - static int find_shared_ctxt(struct file *fp, 912 - const struct hfi1_user_info *uinfo) 913 - { 914 - int devmax, ndev, i; 915 - int ret = 0; 916 - struct hfi1_filedata *fd = fp->private_data; 917 - 918 - devmax = hfi1_count_units(NULL, NULL); 919 - 920 - for (ndev = 0; ndev < devmax; ndev++) { 921 - struct hfi1_devdata *dd = hfi1_lookup(ndev); 922 - 923 - if (!(dd && (dd->flags & HFI1_PRESENT) && dd->kregbase)) 924 - continue; 925 - for (i = dd->first_dyn_alloc_ctxt; 926 - i < dd->num_rcv_contexts; i++) { 927 - struct hfi1_ctxtdata *uctxt = dd->rcd[i]; 928 - 929 - /* Skip ctxts which are not yet open */ 930 - if (!uctxt || !uctxt->cnt) 931 - continue; 932 - 933 - /* Skip dynamically allocted kernel contexts */ 934 - if (uctxt->sc && (uctxt->sc->type == SC_KERNEL)) 935 - continue; 936 - 937 - /* Skip ctxt if it doesn't match the requested one */ 938 - if (memcmp(uctxt->uuid, uinfo->uuid, 939 - sizeof(uctxt->uuid)) || 940 - uctxt->jkey != generate_jkey(current_uid()) || 941 - uctxt->subctxt_id != uinfo->subctxt_id || 942 - uctxt->subctxt_cnt != uinfo->subctxt_cnt) 943 - continue; 944 - 945 - /* Verify the sharing process matches the master */ 946 - if (uctxt->userversion != uinfo->userversion || 947 - uctxt->cnt >= uctxt->subctxt_cnt) { 948 - ret = -EINVAL; 949 - goto done; 950 - } 951 - fd->uctxt = uctxt; 952 - fd->subctxt = uctxt->cnt++; 953 - uctxt->active_slaves |= 1 << fd->subctxt; 954 - ret = 1; 955 - goto done; 889 + if (ret) 890 + clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts); 891 + } else if (!ret) { 892 + ret = setup_base_ctxt(fd); 893 + if (fd->uctxt->subctxt_cnt) { 894 + /* If there is an error, set the failed bit. */ 895 + if (ret) 896 + set_bit(HFI1_CTXT_BASE_FAILED, 897 + &fd->uctxt->event_flags); 898 + /* 899 + * Base context is done, notify anybody using a 900 + * sub-context that is waiting for this completion 901 + */ 902 + clear_bit(HFI1_CTXT_BASE_UNINIT, 903 + &fd->uctxt->event_flags); 904 + wake_up(&fd->uctxt->wait); 956 905 } 957 906 } 958 907 959 - done: 960 908 return ret; 961 909 } 962 910 963 - static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd, 911 + /* 912 + * The hfi1_mutex must be held when this function is called. It is 913 + * necessary to ensure serialized access to the bitmask in_use_ctxts. 914 + */ 915 + static int find_sub_ctxt(struct hfi1_filedata *fd, 916 + const struct hfi1_user_info *uinfo) 917 + { 918 + int i; 919 + struct hfi1_devdata *dd = fd->dd; 920 + u16 subctxt; 921 + 922 + for (i = dd->first_dyn_alloc_ctxt; i < dd->num_rcv_contexts; i++) { 923 + struct hfi1_ctxtdata *uctxt = dd->rcd[i]; 924 + 925 + /* Skip ctxts which are not yet open */ 926 + if (!uctxt || 927 + bitmap_empty(uctxt->in_use_ctxts, 928 + HFI1_MAX_SHARED_CTXTS)) 929 + continue; 930 + 931 + /* Skip dynamically allocted kernel contexts */ 932 + if (uctxt->sc && (uctxt->sc->type == SC_KERNEL)) 933 + continue; 934 + 935 + /* Skip ctxt if it doesn't match the requested one */ 936 + if (memcmp(uctxt->uuid, uinfo->uuid, 937 + sizeof(uctxt->uuid)) || 938 + uctxt->jkey != generate_jkey(current_uid()) || 939 + uctxt->subctxt_id != uinfo->subctxt_id || 940 + uctxt->subctxt_cnt != uinfo->subctxt_cnt) 941 + continue; 942 + 943 + /* Verify the sharing process matches the master */ 944 + if (uctxt->userversion != uinfo->userversion) 945 + return -EINVAL; 946 + 947 + /* Find an unused context */ 948 + subctxt = find_first_zero_bit(uctxt->in_use_ctxts, 949 + HFI1_MAX_SHARED_CTXTS); 950 + if (subctxt >= uctxt->subctxt_cnt) 951 + return -EBUSY; 952 + 953 + fd->uctxt = uctxt; 954 + fd->subctxt = subctxt; 955 + __set_bit(fd->subctxt, uctxt->in_use_ctxts); 956 + 957 + return 1; 958 + } 959 + 960 + return 0; 961 + } 962 + 963 + static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd, 964 964 struct hfi1_user_info *uinfo) 965 965 { 966 - struct hfi1_filedata *fd = fp->private_data; 967 966 struct hfi1_ctxtdata *uctxt; 968 - unsigned ctxt; 967 + unsigned int ctxt; 969 968 int ret, numa; 970 969 971 970 if (dd->flags & HFI1_FROZEN) { ··· 986 969 */ 987 970 return -EIO; 988 971 } 972 + 973 + /* 974 + * This check is sort of redundant to the next EBUSY error. It would 975 + * also indicate an inconsistancy in the driver if this value was 976 + * zero, but there were still contexts available. 977 + */ 978 + if (!dd->freectxts) 979 + return -EBUSY; 989 980 990 981 for (ctxt = dd->first_dyn_alloc_ctxt; 991 982 ctxt < dd->num_rcv_contexts; ctxt++) ··· 1038 1013 goto ctxdata_free; 1039 1014 1040 1015 /* 1041 - * Setup shared context resources if the user-level has requested 1042 - * shared contexts and this is the 'master' process. 1016 + * Setup sub context resources if the user-level has requested 1017 + * sub contexts. 1043 1018 * This has to be done here so the rest of the sub-contexts find the 1044 1019 * proper master. 1045 1020 */ 1046 - if (uinfo->subctxt_cnt && !fd->subctxt) { 1021 + if (uinfo->subctxt_cnt) { 1047 1022 ret = init_subctxts(uctxt, uinfo); 1048 1023 /* 1049 1024 * On error, we don't need to disable and de-allocate the ··· 1080 1055 static int init_subctxts(struct hfi1_ctxtdata *uctxt, 1081 1056 const struct hfi1_user_info *uinfo) 1082 1057 { 1083 - unsigned num_subctxts; 1058 + u16 num_subctxts; 1084 1059 1085 1060 num_subctxts = uinfo->subctxt_cnt; 1086 1061 if (num_subctxts > HFI1_MAX_SHARED_CTXTS) ··· 1088 1063 1089 1064 uctxt->subctxt_cnt = uinfo->subctxt_cnt; 1090 1065 uctxt->subctxt_id = uinfo->subctxt_id; 1091 - uctxt->active_slaves = 1; 1092 1066 uctxt->redirect_seq_cnt = 1; 1093 - set_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags); 1067 + set_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags); 1094 1068 1095 1069 return 0; 1096 1070 } ··· 1097 1073 static int setup_subctxt(struct hfi1_ctxtdata *uctxt) 1098 1074 { 1099 1075 int ret = 0; 1100 - unsigned num_subctxts = uctxt->subctxt_cnt; 1076 + u16 num_subctxts = uctxt->subctxt_cnt; 1101 1077 1102 1078 uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE); 1103 - if (!uctxt->subctxt_uregbase) { 1104 - ret = -ENOMEM; 1105 - goto bail; 1106 - } 1079 + if (!uctxt->subctxt_uregbase) 1080 + return -ENOMEM; 1081 + 1107 1082 /* We can take the size of the RcvHdr Queue from the master */ 1108 1083 uctxt->subctxt_rcvhdr_base = vmalloc_user(uctxt->rcvhdrq_size * 1109 1084 num_subctxts); ··· 1117 1094 ret = -ENOMEM; 1118 1095 goto bail_rhdr; 1119 1096 } 1120 - goto bail; 1097 + 1098 + return 0; 1099 + 1121 1100 bail_rhdr: 1122 1101 vfree(uctxt->subctxt_rcvhdr_base); 1102 + uctxt->subctxt_rcvhdr_base = NULL; 1123 1103 bail_ureg: 1124 1104 vfree(uctxt->subctxt_uregbase); 1125 1105 uctxt->subctxt_uregbase = NULL; 1126 - bail: 1106 + 1127 1107 return ret; 1128 1108 } 1129 1109 1130 - static int user_init(struct file *fp) 1110 + static void user_init(struct hfi1_ctxtdata *uctxt) 1131 1111 { 1132 1112 unsigned int rcvctrl_ops = 0; 1133 - struct hfi1_filedata *fd = fp->private_data; 1134 - struct hfi1_ctxtdata *uctxt = fd->uctxt; 1135 - 1136 - /* make sure that the context has already been setup */ 1137 - if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) 1138 - return -EFAULT; 1139 1113 1140 1114 /* initialize poll variables... */ 1141 1115 uctxt->urgent = 0; ··· 1180 1160 else 1181 1161 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_DIS; 1182 1162 hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt->ctxt); 1183 - 1184 - /* Notify any waiting slaves */ 1185 - if (uctxt->subctxt_cnt) { 1186 - clear_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags); 1187 - wake_up(&uctxt->wait); 1188 - } 1189 - 1190 - return 0; 1191 1163 } 1192 1164 1193 - static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len) 1165 + static int get_ctxt_info(struct hfi1_filedata *fd, void __user *ubase, 1166 + __u32 len) 1194 1167 { 1195 1168 struct hfi1_ctxt_info cinfo; 1196 - struct hfi1_filedata *fd = fp->private_data; 1197 1169 struct hfi1_ctxtdata *uctxt = fd->uctxt; 1198 1170 int ret = 0; 1199 1171 ··· 1223 1211 return ret; 1224 1212 } 1225 1213 1226 - static int setup_ctxt(struct file *fp) 1214 + static int init_user_ctxt(struct hfi1_filedata *fd) 1227 1215 { 1228 - struct hfi1_filedata *fd = fp->private_data; 1216 + struct hfi1_ctxtdata *uctxt = fd->uctxt; 1217 + int ret; 1218 + 1219 + ret = hfi1_user_sdma_alloc_queues(uctxt, fd); 1220 + if (ret) 1221 + return ret; 1222 + 1223 + ret = hfi1_user_exp_rcv_init(fd); 1224 + 1225 + return ret; 1226 + } 1227 + 1228 + static int setup_base_ctxt(struct hfi1_filedata *fd) 1229 + { 1229 1230 struct hfi1_ctxtdata *uctxt = fd->uctxt; 1230 1231 struct hfi1_devdata *dd = uctxt->dd; 1231 1232 int ret = 0; 1232 1233 1233 - /* 1234 - * Context should be set up only once, including allocation and 1235 - * programming of eager buffers. This is done if context sharing 1236 - * is not requested or by the master process. 1237 - */ 1238 - if (!uctxt->subctxt_cnt || !fd->subctxt) { 1239 - ret = hfi1_init_ctxt(uctxt->sc); 1240 - if (ret) 1241 - goto done; 1234 + hfi1_init_ctxt(uctxt->sc); 1242 1235 1243 - /* Now allocate the RcvHdr queue and eager buffers. */ 1244 - ret = hfi1_create_rcvhdrq(dd, uctxt); 1245 - if (ret) 1246 - goto done; 1247 - ret = hfi1_setup_eagerbufs(uctxt); 1248 - if (ret) 1249 - goto done; 1250 - if (uctxt->subctxt_cnt && !fd->subctxt) { 1251 - ret = setup_subctxt(uctxt); 1252 - if (ret) 1253 - goto done; 1254 - } 1255 - } else { 1256 - ret = wait_event_interruptible(uctxt->wait, !test_bit( 1257 - HFI1_CTXT_MASTER_UNINIT, 1258 - &uctxt->event_flags)); 1259 - if (ret) 1260 - goto done; 1261 - } 1262 - 1263 - ret = hfi1_user_sdma_alloc_queues(uctxt, fp); 1236 + /* Now allocate the RcvHdr queue and eager buffers. */ 1237 + ret = hfi1_create_rcvhdrq(dd, uctxt); 1264 1238 if (ret) 1265 - goto done; 1266 - /* 1267 - * Expected receive has to be setup for all processes (including 1268 - * shared contexts). However, it has to be done after the master 1269 - * context has been fully configured as it depends on the 1270 - * eager/expected split of the RcvArray entries. 1271 - * Setting it up here ensures that the subcontexts will be waiting 1272 - * (due to the above wait_event_interruptible() until the master 1273 - * is setup. 1274 - */ 1275 - ret = hfi1_user_exp_rcv_init(fp); 1276 - if (ret) 1277 - goto done; 1239 + return ret; 1278 1240 1279 - set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags); 1280 - done: 1241 + ret = hfi1_setup_eagerbufs(uctxt); 1242 + if (ret) 1243 + goto setup_failed; 1244 + 1245 + /* If sub-contexts are enabled, do the appropriate setup */ 1246 + if (uctxt->subctxt_cnt) 1247 + ret = setup_subctxt(uctxt); 1248 + if (ret) 1249 + goto setup_failed; 1250 + 1251 + ret = hfi1_user_exp_rcv_grp_init(fd); 1252 + if (ret) 1253 + goto setup_failed; 1254 + 1255 + ret = init_user_ctxt(fd); 1256 + if (ret) 1257 + goto setup_failed; 1258 + 1259 + user_init(uctxt); 1260 + 1261 + return 0; 1262 + 1263 + setup_failed: 1264 + hfi1_free_ctxtdata(dd, uctxt); 1281 1265 return ret; 1282 1266 } 1283 1267 1284 - static int get_base_info(struct file *fp, void __user *ubase, __u32 len) 1268 + static int get_base_info(struct hfi1_filedata *fd, void __user *ubase, 1269 + __u32 len) 1285 1270 { 1286 1271 struct hfi1_base_info binfo; 1287 - struct hfi1_filedata *fd = fp->private_data; 1288 1272 struct hfi1_ctxtdata *uctxt = fd->uctxt; 1289 1273 struct hfi1_devdata *dd = uctxt->dd; 1290 1274 ssize_t sz; 1291 1275 unsigned offset; 1292 1276 int ret = 0; 1293 1277 1294 - trace_hfi1_uctxtdata(uctxt->dd, uctxt); 1278 + trace_hfi1_uctxtdata(uctxt->dd, uctxt, fd->subctxt); 1295 1279 1296 1280 memset(&binfo, 0, sizeof(binfo)); 1297 1281 binfo.hw_version = dd->revision; ··· 1451 1443 * overflow conditions. start_stop==1 re-enables, to be used to 1452 1444 * re-init the software copy of the head register 1453 1445 */ 1454 - static int manage_rcvq(struct hfi1_ctxtdata *uctxt, unsigned subctxt, 1446 + static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt, 1455 1447 int start_stop) 1456 1448 { 1457 1449 struct hfi1_devdata *dd = uctxt->dd; ··· 1486 1478 * User process then performs actions appropriate to bit having been 1487 1479 * set, if desired, and checks again in future. 1488 1480 */ 1489 - static int user_event_ack(struct hfi1_ctxtdata *uctxt, int subctxt, 1481 + static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt, 1490 1482 unsigned long events) 1491 1483 { 1492 1484 int i; ··· 1507 1499 return 0; 1508 1500 } 1509 1501 1510 - static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned subctxt, 1511 - u16 pkey) 1502 + static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, u16 subctxt, u16 pkey) 1512 1503 { 1513 1504 int ret = -ENOENT, i, intable = 0; 1514 1505 struct hfi1_pportdata *ppd = uctxt->ppd;
+50 -55
drivers/infiniband/hw/hfi1/hfi.h
··· 196 196 void *rcvhdrq; 197 197 /* kernel virtual address where hdrqtail is updated */ 198 198 volatile __le64 *rcvhdrtail_kvaddr; 199 - /* 200 - * Shared page for kernel to signal user processes that send buffers 201 - * need disarming. The process should call HFI1_CMD_DISARM_BUFS 202 - * or HFI1_CMD_ACK_EVENT with IPATH_EVENT_DISARM_BUFS set. 203 - */ 204 - unsigned long *user_event_mask; 205 199 /* when waiting for rcv or pioavail */ 206 200 wait_queue_head_t wait; 207 201 /* rcvhdrq size (for freeing) */ ··· 218 224 * (ignoring forks, dup, etc. for now) 219 225 */ 220 226 int cnt; 221 - /* 222 - * how much space to leave at start of eager TID entries for 223 - * protocol use, on each TID 224 - */ 225 - /* instead of calculating it */ 227 + /* Device context index */ 226 228 unsigned ctxt; 227 - /* non-zero if ctxt is being shared. */ 229 + /* 230 + * non-zero if ctxt can be shared, and defines the maximum number of 231 + * sub-contexts for this device context. 232 + */ 228 233 u16 subctxt_cnt; 229 234 /* non-zero if ctxt is being shared. */ 230 235 u16 subctxt_id; ··· 281 288 void *subctxt_rcvegrbuf; 282 289 /* An array of pages for the eager header queue entries * N */ 283 290 void *subctxt_rcvhdr_base; 291 + /* Bitmask of in use context(s) */ 292 + DECLARE_BITMAP(in_use_ctxts, HFI1_MAX_SHARED_CTXTS); 284 293 /* The version of the library which opened this ctxt */ 285 294 u32 userversion; 286 - /* Bitmask of active slaves */ 287 - u32 active_slaves; 288 295 /* Type of packets or conditions we want to poll for */ 289 296 u16 poll_type; 290 297 /* receive packet sequence counter */ ··· 1231 1238 1232 1239 /* Private data for file operations */ 1233 1240 struct hfi1_filedata { 1241 + struct hfi1_devdata *dd; 1234 1242 struct hfi1_ctxtdata *uctxt; 1235 - unsigned subctxt; 1236 1243 struct hfi1_user_sdma_comp_q *cq; 1237 1244 struct hfi1_user_sdma_pkt_q *pq; 1245 + u16 subctxt; 1238 1246 /* for cpu affinity; -1 if none */ 1239 1247 int rec_cpu_num; 1240 1248 u32 tid_n_pinned; ··· 1257 1263 extern u32 hfi1_cpulist_count; 1258 1264 extern unsigned long *hfi1_cpulist; 1259 1265 1260 - int hfi1_init(struct hfi1_devdata *, int); 1261 - int hfi1_count_units(int *npresentp, int *nupp); 1266 + int hfi1_init(struct hfi1_devdata *dd, int reinit); 1262 1267 int hfi1_count_active_units(void); 1263 1268 1264 - int hfi1_diag_add(struct hfi1_devdata *); 1265 - void hfi1_diag_remove(struct hfi1_devdata *); 1269 + int hfi1_diag_add(struct hfi1_devdata *dd); 1270 + void hfi1_diag_remove(struct hfi1_devdata *dd); 1266 1271 void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup); 1267 1272 1268 1273 void handle_user_interrupt(struct hfi1_ctxtdata *rcd); 1269 1274 1270 - int hfi1_create_rcvhdrq(struct hfi1_devdata *, struct hfi1_ctxtdata *); 1271 - int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *); 1275 + int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd); 1276 + int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd); 1272 1277 int hfi1_create_ctxts(struct hfi1_devdata *dd); 1273 - struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *, u32, int); 1274 - void hfi1_init_pportdata(struct pci_dev *, struct hfi1_pportdata *, 1275 - struct hfi1_devdata *, u8, u8); 1276 - void hfi1_free_ctxtdata(struct hfi1_devdata *, struct hfi1_ctxtdata *); 1278 + struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt, 1279 + int numa); 1280 + void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, 1281 + struct hfi1_devdata *dd, u8 hw_pidx, u8 port); 1282 + void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd); 1277 1283 1278 - int handle_receive_interrupt(struct hfi1_ctxtdata *, int); 1279 - int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *, int); 1280 - int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *, int); 1284 + int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread); 1285 + int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread); 1286 + int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread); 1281 1287 void set_all_slowpath(struct hfi1_devdata *dd); 1282 1288 void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd); 1283 1289 void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd); ··· 1574 1580 1575 1581 u32 lrh_max_header_bytes(struct hfi1_devdata *dd); 1576 1582 int mtu_to_enum(u32 mtu, int default_if_bad); 1577 - u16 enum_to_mtu(int); 1583 + u16 enum_to_mtu(int mtu); 1578 1584 static inline int valid_ib_mtu(unsigned int mtu) 1579 1585 { 1580 1586 return mtu == 256 || mtu == 512 || ··· 1588 1594 (valid_ib_mtu(mtu) || mtu == 8192 || mtu == 10240); 1589 1595 } 1590 1596 1591 - int set_mtu(struct hfi1_pportdata *); 1597 + int set_mtu(struct hfi1_pportdata *ppd); 1592 1598 1593 - int hfi1_set_lid(struct hfi1_pportdata *, u32, u8); 1594 - void hfi1_disable_after_error(struct hfi1_devdata *); 1595 - int hfi1_set_uevent_bits(struct hfi1_pportdata *, const int); 1596 - int hfi1_rcvbuf_validate(u32, u8, u16 *); 1599 + int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc); 1600 + void hfi1_disable_after_error(struct hfi1_devdata *dd); 1601 + int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit); 1602 + int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encode); 1597 1603 1598 - int fm_get_table(struct hfi1_pportdata *, int, void *); 1599 - int fm_set_table(struct hfi1_pportdata *, int, void *); 1604 + int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t); 1605 + int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t); 1600 1606 1601 1607 void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf); 1602 1608 void reset_link_credits(struct hfi1_devdata *dd); ··· 1718 1724 #define HFI1_PBC_LENGTH_MASK ((1 << 11) - 1) 1719 1725 1720 1726 /* ctxt_flag bit offsets */ 1721 - /* context has been setup */ 1722 - #define HFI1_CTXT_SETUP_DONE 1 1727 + /* base context has not finished initializing */ 1728 + #define HFI1_CTXT_BASE_UNINIT 1 1729 + /* base context initaliation failed */ 1730 + #define HFI1_CTXT_BASE_FAILED 2 1723 1731 /* waiting for a packet to arrive */ 1724 - #define HFI1_CTXT_WAITING_RCV 2 1725 - /* master has not finished initializing */ 1726 - #define HFI1_CTXT_MASTER_UNINIT 4 1732 + #define HFI1_CTXT_WAITING_RCV 3 1727 1733 /* waiting for an urgent packet to arrive */ 1728 - #define HFI1_CTXT_WAITING_URG 5 1734 + #define HFI1_CTXT_WAITING_URG 4 1729 1735 1730 1736 /* free up any allocated data at closes */ 1731 - struct hfi1_devdata *hfi1_init_dd(struct pci_dev *, 1732 - const struct pci_device_id *); 1733 - void hfi1_free_devdata(struct hfi1_devdata *); 1737 + struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, 1738 + const struct pci_device_id *ent); 1739 + void hfi1_free_devdata(struct hfi1_devdata *dd); 1734 1740 struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra); 1735 1741 1736 1742 /* LED beaconing functions */ ··· 1805 1811 1806 1812 extern const char ib_hfi1_version[]; 1807 1813 1808 - int hfi1_device_create(struct hfi1_devdata *); 1809 - void hfi1_device_remove(struct hfi1_devdata *); 1814 + int hfi1_device_create(struct hfi1_devdata *dd); 1815 + void hfi1_device_remove(struct hfi1_devdata *dd); 1810 1816 1811 1817 int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, 1812 1818 struct kobject *kobj); 1813 - int hfi1_verbs_register_sysfs(struct hfi1_devdata *); 1814 - void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *); 1819 + int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd); 1820 + void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd); 1815 1821 /* Hook for sysfs read of QSFP */ 1816 1822 int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len); 1817 1823 1818 - int hfi1_pcie_init(struct pci_dev *, const struct pci_device_id *); 1819 - void hfi1_pcie_cleanup(struct pci_dev *); 1820 - int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *); 1824 + int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent); 1825 + void hfi1_pcie_cleanup(struct pci_dev *pdev); 1826 + int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev); 1821 1827 void hfi1_pcie_ddcleanup(struct hfi1_devdata *); 1822 - int pcie_speeds(struct hfi1_devdata *); 1823 - void request_msix(struct hfi1_devdata *, u32 *, struct hfi1_msix_entry *); 1824 - void hfi1_enable_intx(struct pci_dev *); 1828 + int pcie_speeds(struct hfi1_devdata *dd); 1829 + void request_msix(struct hfi1_devdata *dd, u32 *nent, 1830 + struct hfi1_msix_entry *entry); 1831 + void hfi1_enable_intx(struct pci_dev *pdev); 1825 1832 void restore_pci_variables(struct hfi1_devdata *dd); 1826 1833 int do_pcie_gen3_transition(struct hfi1_devdata *dd); 1827 1834 int parse_platform_config(struct hfi1_devdata *dd);
+14 -19
drivers/infiniband/hw/hfi1/init.c
··· 53 53 #include <linux/module.h> 54 54 #include <linux/printk.h> 55 55 #include <linux/hrtimer.h> 56 + #include <linux/bitmap.h> 56 57 #include <rdma/rdma_vt.h> 57 58 58 59 #include "hfi.h" ··· 71 70 #undef pr_fmt 72 71 #define pr_fmt(fmt) DRIVER_NAME ": " fmt 73 72 73 + #define HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES 5 74 74 /* 75 75 * min buffers we want to have per context, after driver 76 76 */ ··· 103 101 module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO); 104 102 MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers"); 105 103 106 - static uint eager_buffer_size = (2 << 20); /* 2MB */ 104 + static uint eager_buffer_size = (8 << 20); /* 8MB */ 107 105 module_param(eager_buffer_size, uint, S_IRUGO); 108 - MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 2MB"); 106 + MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 8MB"); 109 107 110 108 static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */ 111 109 module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO); ··· 119 117 module_param(user_credit_return_threshold, uint, S_IRUGO); 120 118 MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)"); 121 119 122 - static inline u64 encode_rcv_header_entry_size(u16); 120 + static inline u64 encode_rcv_header_entry_size(u16 size); 123 121 124 122 static struct idr hfi1_unit_table; 125 123 u32 hfi1_cpulist_count; ··· 177 175 goto nomem; 178 176 } 179 177 180 - ret = hfi1_init_ctxt(rcd->sc); 181 - if (ret < 0) { 182 - dd_dev_err(dd, 183 - "Failed to setup kernel receive context, failing\n"); 184 - ret = -EFAULT; 185 - goto bail; 186 - } 178 + hfi1_init_ctxt(rcd->sc); 187 179 } 188 180 189 181 /* ··· 189 193 return 0; 190 194 nomem: 191 195 ret = -ENOMEM; 192 - bail: 196 + 193 197 if (dd->rcd) { 194 198 for (i = 0; i < dd->num_rcv_contexts; ++i) 195 199 hfi1_free_ctxtdata(dd, dd->rcd[i]); ··· 223 227 INIT_LIST_HEAD(&rcd->qp_wait_list); 224 228 rcd->ppd = ppd; 225 229 rcd->dd = dd; 226 - rcd->cnt = 1; 230 + __set_bit(0, rcd->in_use_ctxts); 227 231 rcd->ctxt = ctxt; 228 232 dd->rcd[ctxt] = rcd; 229 233 rcd->numa_id = numa; ··· 619 623 alloc_workqueue( 620 624 "hfi%d_%d", 621 625 WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE, 622 - dd->num_sdma, 626 + HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES, 623 627 dd->unit, pidx); 624 628 if (!ppd->hfi1_wq) 625 629 goto wq_error; ··· 964 968 kfree(rcd->egrbufs.buffers); 965 969 966 970 sc_free(rcd->sc); 967 - vfree(rcd->user_event_mask); 968 971 vfree(rcd->subctxt_uregbase); 969 972 vfree(rcd->subctxt_rcvegrbuf); 970 973 vfree(rcd->subctxt_rcvhdr_base); ··· 1682 1687 dd_dev_err(dd, 1683 1688 "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n", 1684 1689 rcd->ctxt); 1685 - vfree(rcd->user_event_mask); 1686 - rcd->user_event_mask = NULL; 1687 1690 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, 1688 1691 rcd->rcvhdrq_dma); 1689 1692 rcd->rcvhdrq = NULL; ··· 1770 1777 !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) { 1771 1778 dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n", 1772 1779 rcd->ctxt); 1780 + ret = -ENOMEM; 1773 1781 goto bail_rcvegrbuf_phys; 1774 1782 } 1775 1783 ··· 1848 1854 "ctxt%u: current Eager buffer size is invalid %u\n", 1849 1855 rcd->ctxt, rcd->egrbufs.rcvtid_size); 1850 1856 ret = -EINVAL; 1851 - goto bail; 1857 + goto bail_rcvegrbuf_phys; 1852 1858 } 1853 1859 1854 1860 for (idx = 0; idx < rcd->egrbufs.alloced; idx++) { ··· 1856 1862 rcd->egrbufs.rcvtids[idx].dma, order); 1857 1863 cond_resched(); 1858 1864 } 1859 - goto bail; 1865 + 1866 + return 0; 1860 1867 1861 1868 bail_rcvegrbuf_phys: 1862 1869 for (idx = 0; idx < rcd->egrbufs.alloced && ··· 1871 1876 rcd->egrbufs.buffers[idx].dma = 0; 1872 1877 rcd->egrbufs.buffers[idx].len = 0; 1873 1878 } 1874 - bail: 1879 + 1875 1880 return ret; 1876 1881 }
+2 -1
drivers/infiniband/hw/hfi1/intr.c
··· 47 47 48 48 #include <linux/pci.h> 49 49 #include <linux/delay.h> 50 + #include <linux/bitmap.h> 50 51 51 52 #include "hfi.h" 52 53 #include "common.h" ··· 190 189 unsigned long flags; 191 190 192 191 spin_lock_irqsave(&dd->uctxt_lock, flags); 193 - if (!rcd->cnt) 192 + if (bitmap_empty(rcd->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) 194 193 goto done; 195 194 196 195 if (test_and_clear_bit(HFI1_CTXT_WAITING_RCV, &rcd->event_flags)) {
+1 -3
drivers/infiniband/hw/hfi1/qp.c
··· 731 731 732 732 void notify_qp_reset(struct rvt_qp *qp) 733 733 { 734 - struct hfi1_qp_priv *priv = qp->priv; 735 - 736 - priv->r_adefered = 0; 734 + qp->r_adefered = 0; 737 735 clear_ahg(qp); 738 736 } 739 737
+4 -9
drivers/infiniband/hw/hfi1/rc.c
··· 727 727 struct ib_header hdr; 728 728 struct ib_other_headers *ohdr; 729 729 unsigned long flags; 730 - struct hfi1_qp_priv *priv = qp->priv; 731 730 732 731 /* clear the defer count */ 733 - priv->r_adefered = 0; 732 + qp->r_adefered = 0; 734 733 735 734 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ 736 735 if (qp->s_flags & RVT_S_RESP_PENDING) ··· 1603 1604 1604 1605 static inline void rc_cancel_ack(struct rvt_qp *qp) 1605 1606 { 1606 - struct hfi1_qp_priv *priv = qp->priv; 1607 - 1608 - priv->r_adefered = 0; 1607 + qp->r_adefered = 0; 1609 1608 if (list_empty(&qp->rspwait)) 1610 1609 return; 1611 1610 list_del_init(&qp->rspwait); ··· 2311 2314 qp->r_nak_state = 0; 2312 2315 /* Send an ACK if requested or required. */ 2313 2316 if (psn & IB_BTH_REQ_ACK) { 2314 - struct hfi1_qp_priv *priv = qp->priv; 2315 - 2316 2317 if (packet->numpkt == 0) { 2317 2318 rc_cancel_ack(qp); 2318 2319 goto send_ack; 2319 2320 } 2320 - if (priv->r_adefered >= HFI1_PSN_CREDIT) { 2321 + if (qp->r_adefered >= HFI1_PSN_CREDIT) { 2321 2322 rc_cancel_ack(qp); 2322 2323 goto send_ack; 2323 2324 } ··· 2323 2328 rc_cancel_ack(qp); 2324 2329 goto send_ack; 2325 2330 } 2326 - priv->r_adefered++; 2331 + qp->r_adefered++; 2327 2332 rc_defered_ack(rcd, qp); 2328 2333 } 2329 2334 return;
+50 -30
drivers/infiniband/hw/hfi1/ruc.c
··· 800 800 /* when sending, force a reschedule every one of these periods */ 801 801 #define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */ 802 802 803 + /** 804 + * schedule_send_yield - test for a yield required for QP send engine 805 + * @timeout: Final time for timeout slice for jiffies 806 + * @qp: a pointer to QP 807 + * @ps: a pointer to a structure with commonly lookup values for 808 + * the the send engine progress 809 + * 810 + * This routine checks if the time slice for the QP has expired 811 + * for RC QPs, if so an additional work entry is queued. At this 812 + * point, other QPs have an opportunity to be scheduled. It 813 + * returns true if a yield is required, otherwise, false 814 + * is returned. 815 + */ 816 + static bool schedule_send_yield(struct rvt_qp *qp, 817 + struct hfi1_pkt_state *ps) 818 + { 819 + if (unlikely(time_after(jiffies, ps->timeout))) { 820 + if (!ps->in_thread || 821 + workqueue_congested(ps->cpu, ps->ppd->hfi1_wq)) { 822 + spin_lock_irqsave(&qp->s_lock, ps->flags); 823 + qp->s_flags &= ~RVT_S_BUSY; 824 + hfi1_schedule_send(qp); 825 + spin_unlock_irqrestore(&qp->s_lock, ps->flags); 826 + this_cpu_inc(*ps->ppd->dd->send_schedule); 827 + trace_hfi1_rc_expired_time_slice(qp, true); 828 + return true; 829 + } 830 + 831 + cond_resched(); 832 + this_cpu_inc(*ps->ppd->dd->send_schedule); 833 + ps->timeout = jiffies + ps->timeout_int; 834 + } 835 + 836 + trace_hfi1_rc_expired_time_slice(qp, false); 837 + return false; 838 + } 839 + 803 840 void hfi1_do_send_from_rvt(struct rvt_qp *qp) 804 841 { 805 842 hfi1_do_send(qp, false); ··· 864 827 struct hfi1_pkt_state ps; 865 828 struct hfi1_qp_priv *priv = qp->priv; 866 829 int (*make_req)(struct rvt_qp *qp, struct hfi1_pkt_state *ps); 867 - unsigned long timeout; 868 - unsigned long timeout_int; 869 - int cpu; 870 830 871 831 ps.dev = to_idev(qp->ibqp.device); 872 832 ps.ibp = to_iport(qp->ibqp.device, qp->port_num); 873 833 ps.ppd = ppd_from_ibp(ps.ibp); 834 + ps.in_thread = in_thread; 835 + 836 + trace_hfi1_rc_do_send(qp, in_thread); 874 837 875 838 switch (qp->ibqp.qp_type) { 876 839 case IB_QPT_RC: ··· 881 844 return; 882 845 } 883 846 make_req = hfi1_make_rc_req; 884 - timeout_int = (qp->timeout_jiffies); 847 + ps.timeout_int = qp->timeout_jiffies; 885 848 break; 886 849 case IB_QPT_UC: 887 850 if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) & ··· 891 854 return; 892 855 } 893 856 make_req = hfi1_make_uc_req; 894 - timeout_int = SEND_RESCHED_TIMEOUT; 857 + ps.timeout_int = SEND_RESCHED_TIMEOUT; 895 858 break; 896 859 default: 897 860 make_req = hfi1_make_ud_req; 898 - timeout_int = SEND_RESCHED_TIMEOUT; 861 + ps.timeout_int = SEND_RESCHED_TIMEOUT; 899 862 } 900 863 901 864 spin_lock_irqsave(&qp->s_lock, ps.flags); ··· 908 871 909 872 qp->s_flags |= RVT_S_BUSY; 910 873 911 - timeout = jiffies + (timeout_int) / 8; 912 - cpu = priv->s_sde ? priv->s_sde->cpu : 874 + ps.timeout_int = ps.timeout_int / 8; 875 + ps.timeout = jiffies + ps.timeout_int; 876 + ps.cpu = priv->s_sde ? priv->s_sde->cpu : 913 877 cpumask_first(cpumask_of_node(ps.ppd->dd->node)); 878 + 914 879 /* insure a pre-built packet is handled */ 915 880 ps.s_txreq = get_waiting_verbs_txreq(qp); 916 881 do { ··· 928 889 /* Record that s_ahg is empty. */ 929 890 qp->s_hdrwords = 0; 930 891 /* allow other tasks to run */ 931 - if (unlikely(time_after(jiffies, timeout))) { 932 - if (!in_thread || 933 - workqueue_congested( 934 - cpu, 935 - ps.ppd->hfi1_wq)) { 936 - spin_lock_irqsave( 937 - &qp->s_lock, 938 - ps.flags); 939 - qp->s_flags &= ~RVT_S_BUSY; 940 - hfi1_schedule_send(qp); 941 - spin_unlock_irqrestore( 942 - &qp->s_lock, 943 - ps.flags); 944 - this_cpu_inc( 945 - *ps.ppd->dd->send_schedule); 946 - return; 947 - } 948 - cond_resched(); 949 - this_cpu_inc( 950 - *ps.ppd->dd->send_schedule); 951 - timeout = jiffies + (timeout_int) / 8; 952 - } 892 + if (schedule_send_yield(qp, &ps)) 893 + return; 894 + 953 895 spin_lock_irqsave(&qp->s_lock, ps.flags); 954 896 } 955 897 } while (make_req(qp, &ps));
+12 -5
drivers/infiniband/hw/hfi1/trace_ctxts.h
··· 57 57 58 58 #define UCTXT_FMT \ 59 59 "cred:%u, credaddr:0x%llx, piobase:0x%p, rcvhdr_cnt:%u, " \ 60 - "rcvbase:0x%llx, rcvegrc:%u, rcvegrb:0x%llx" 60 + "rcvbase:0x%llx, rcvegrc:%u, rcvegrb:0x%llx, subctxt_cnt:%u" 61 61 TRACE_EVENT(hfi1_uctxtdata, 62 - TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt), 63 - TP_ARGS(dd, uctxt), 62 + TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt, 63 + unsigned int subctxt), 64 + TP_ARGS(dd, uctxt, subctxt), 64 65 TP_STRUCT__entry(DD_DEV_ENTRY(dd) 65 66 __field(unsigned int, ctxt) 67 + __field(unsigned int, subctxt) 66 68 __field(u32, credits) 67 69 __field(u64, hw_free) 68 70 __field(void __iomem *, piobase) ··· 72 70 __field(u64, rcvhdrq_dma) 73 71 __field(u32, eager_cnt) 74 72 __field(u64, rcvegr_dma) 73 + __field(unsigned int, subctxt_cnt) 75 74 ), 76 75 TP_fast_assign(DD_DEV_ASSIGN(dd); 77 76 __entry->ctxt = uctxt->ctxt; 77 + __entry->subctxt = subctxt; 78 78 __entry->credits = uctxt->sc->credits; 79 79 __entry->hw_free = le64_to_cpu(*uctxt->sc->hw_free); 80 80 __entry->piobase = uctxt->sc->base_addr; ··· 84 80 __entry->rcvhdrq_dma = uctxt->rcvhdrq_dma; 85 81 __entry->eager_cnt = uctxt->egrbufs.alloced; 86 82 __entry->rcvegr_dma = uctxt->egrbufs.rcvtids[0].dma; 83 + __entry->subctxt_cnt = uctxt->subctxt_cnt; 87 84 ), 88 - TP_printk("[%s] ctxt %u " UCTXT_FMT, 85 + TP_printk("[%s] ctxt %u:%u " UCTXT_FMT, 89 86 __get_str(dev), 90 87 __entry->ctxt, 88 + __entry->subctxt, 91 89 __entry->credits, 92 90 __entry->hw_free, 93 91 __entry->piobase, 94 92 __entry->rcvhdrq_cnt, 95 93 __entry->rcvhdrq_dma, 96 94 __entry->eager_cnt, 97 - __entry->rcvegr_dma 95 + __entry->rcvegr_dma, 96 + __entry->subctxt_cnt 98 97 ) 99 98 ); 100 99
+34
drivers/infiniband/hw/hfi1/trace_tx.h
··· 676 676 ) 677 677 ); 678 678 679 + DECLARE_EVENT_CLASS( 680 + hfi1_do_send_template, 681 + TP_PROTO(struct rvt_qp *qp, bool flag), 682 + TP_ARGS(qp, flag), 683 + TP_STRUCT__entry( 684 + DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) 685 + __field(u32, qpn) 686 + __field(bool, flag) 687 + ), 688 + TP_fast_assign( 689 + DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)) 690 + __entry->qpn = qp->ibqp.qp_num; 691 + __entry->flag = flag; 692 + ), 693 + TP_printk( 694 + "[%s] qpn %x flag %d", 695 + __get_str(dev), 696 + __entry->qpn, 697 + __entry->flag 698 + ) 699 + ); 700 + 701 + DEFINE_EVENT( 702 + hfi1_do_send_template, hfi1_rc_do_send, 703 + TP_PROTO(struct rvt_qp *qp, bool flag), 704 + TP_ARGS(qp, flag) 705 + ); 706 + 707 + DEFINE_EVENT( 708 + hfi1_do_send_template, hfi1_rc_expired_time_slice, 709 + TP_PROTO(struct rvt_qp *qp, bool flag), 710 + TP_ARGS(qp, flag) 711 + ); 712 + 679 713 #endif /* __HFI1_TRACE_TX_H */ 680 714 681 715 #undef TRACE_INCLUDE_PATH
+97 -83
drivers/infiniband/hw/hfi1/user_exp_rcv.c
··· 53 53 54 54 struct tid_group { 55 55 struct list_head list; 56 - unsigned base; 56 + u32 base; 57 57 u8 size; 58 58 u8 used; 59 59 u8 map; ··· 82 82 (unsigned long)(len) - 1) & PAGE_MASK) - \ 83 83 ((unsigned long)vaddr & PAGE_MASK)) >> PAGE_SHIFT)) 84 84 85 - static void unlock_exp_tids(struct hfi1_ctxtdata *, struct exp_tid_set *, 86 - struct hfi1_filedata *); 87 - static u32 find_phys_blocks(struct page **, unsigned, struct tid_pageset *); 88 - static int set_rcvarray_entry(struct file *, unsigned long, u32, 89 - struct tid_group *, struct page **, unsigned); 90 - static int tid_rb_insert(void *, struct mmu_rb_node *); 85 + static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt, 86 + struct exp_tid_set *set, 87 + struct hfi1_filedata *fd); 88 + static u32 find_phys_blocks(struct page **pages, unsigned npages, 89 + struct tid_pageset *list); 90 + static int set_rcvarray_entry(struct hfi1_filedata *fd, unsigned long vaddr, 91 + u32 rcventry, struct tid_group *grp, 92 + struct page **pages, unsigned npages); 93 + static int tid_rb_insert(void *arg, struct mmu_rb_node *node); 91 94 static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata, 92 95 struct tid_rb_node *tnode); 93 - static void tid_rb_remove(void *, struct mmu_rb_node *); 94 - static int tid_rb_invalidate(void *, struct mmu_rb_node *); 95 - static int program_rcvarray(struct file *, unsigned long, struct tid_group *, 96 - struct tid_pageset *, unsigned, u16, struct page **, 97 - u32 *, unsigned *, unsigned *); 98 - static int unprogram_rcvarray(struct file *, u32, struct tid_group **); 96 + static void tid_rb_remove(void *arg, struct mmu_rb_node *node); 97 + static int tid_rb_invalidate(void *arg, struct mmu_rb_node *mnode); 98 + static int program_rcvarray(struct hfi1_filedata *fd, unsigned long vaddr, 99 + struct tid_group *grp, struct tid_pageset *sets, 100 + unsigned start, u16 count, struct page **pages, 101 + u32 *tidlist, unsigned *tididx, unsigned *pmapped); 102 + static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo, 103 + struct tid_group **grp); 99 104 static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node); 100 105 101 106 static struct mmu_rb_ops tid_rb_ops = { ··· 154 149 tid_group_add_tail(group, s2); 155 150 } 156 151 152 + int hfi1_user_exp_rcv_grp_init(struct hfi1_filedata *fd) 153 + { 154 + struct hfi1_ctxtdata *uctxt = fd->uctxt; 155 + struct hfi1_devdata *dd = fd->dd; 156 + u32 tidbase; 157 + u32 i; 158 + struct tid_group *grp, *gptr; 159 + 160 + exp_tid_group_init(&uctxt->tid_group_list); 161 + exp_tid_group_init(&uctxt->tid_used_list); 162 + exp_tid_group_init(&uctxt->tid_full_list); 163 + 164 + tidbase = uctxt->expected_base; 165 + for (i = 0; i < uctxt->expected_count / 166 + dd->rcv_entries.group_size; i++) { 167 + grp = kzalloc(sizeof(*grp), GFP_KERNEL); 168 + if (!grp) 169 + goto grp_failed; 170 + 171 + grp->size = dd->rcv_entries.group_size; 172 + grp->base = tidbase; 173 + tid_group_add_tail(grp, &uctxt->tid_group_list); 174 + tidbase += dd->rcv_entries.group_size; 175 + } 176 + 177 + return 0; 178 + 179 + grp_failed: 180 + list_for_each_entry_safe(grp, gptr, &uctxt->tid_group_list.list, 181 + list) { 182 + list_del_init(&grp->list); 183 + kfree(grp); 184 + } 185 + 186 + return -ENOMEM; 187 + } 188 + 157 189 /* 158 190 * Initialize context and file private data needed for Expected 159 191 * receive caching. This needs to be done after the context has 160 192 * been configured with the eager/expected RcvEntry counts. 161 193 */ 162 - int hfi1_user_exp_rcv_init(struct file *fp) 194 + int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd) 163 195 { 164 - struct hfi1_filedata *fd = fp->private_data; 165 196 struct hfi1_ctxtdata *uctxt = fd->uctxt; 166 197 struct hfi1_devdata *dd = uctxt->dd; 167 - unsigned tidbase; 168 - int i, ret = 0; 198 + int ret = 0; 169 199 170 200 spin_lock_init(&fd->tid_lock); 171 201 spin_lock_init(&fd->invalid_lock); 172 202 173 - if (!uctxt->subctxt_cnt || !fd->subctxt) { 174 - exp_tid_group_init(&uctxt->tid_group_list); 175 - exp_tid_group_init(&uctxt->tid_used_list); 176 - exp_tid_group_init(&uctxt->tid_full_list); 177 - 178 - tidbase = uctxt->expected_base; 179 - for (i = 0; i < uctxt->expected_count / 180 - dd->rcv_entries.group_size; i++) { 181 - struct tid_group *grp; 182 - 183 - grp = kzalloc(sizeof(*grp), GFP_KERNEL); 184 - if (!grp) { 185 - /* 186 - * If we fail here, the groups already 187 - * allocated will be freed by the close 188 - * call. 189 - */ 190 - ret = -ENOMEM; 191 - goto done; 192 - } 193 - grp->size = dd->rcv_entries.group_size; 194 - grp->base = tidbase; 195 - tid_group_add_tail(grp, &uctxt->tid_group_list); 196 - tidbase += dd->rcv_entries.group_size; 197 - } 198 - } 199 - 200 203 fd->entry_to_rb = kcalloc(uctxt->expected_count, 201 - sizeof(struct rb_node *), 202 - GFP_KERNEL); 204 + sizeof(struct rb_node *), 205 + GFP_KERNEL); 203 206 if (!fd->entry_to_rb) 204 207 return -ENOMEM; 205 208 ··· 217 204 sizeof(*fd->invalid_tids), 218 205 GFP_KERNEL); 219 206 if (!fd->invalid_tids) { 220 - ret = -ENOMEM; 221 - goto done; 207 + kfree(fd->entry_to_rb); 208 + fd->entry_to_rb = NULL; 209 + return -ENOMEM; 222 210 } 223 211 224 212 /* ··· 262 248 fd->tid_limit = uctxt->expected_count; 263 249 } 264 250 spin_unlock(&fd->tid_lock); 265 - done: 251 + 266 252 return ret; 267 253 } 268 254 269 - int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd) 255 + void hfi1_user_exp_rcv_grp_free(struct hfi1_ctxtdata *uctxt) 270 256 { 271 - struct hfi1_ctxtdata *uctxt = fd->uctxt; 272 257 struct tid_group *grp, *gptr; 273 258 274 - if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) 275 - return 0; 259 + list_for_each_entry_safe(grp, gptr, &uctxt->tid_group_list.list, 260 + list) { 261 + list_del_init(&grp->list); 262 + kfree(grp); 263 + } 264 + hfi1_clear_tids(uctxt); 265 + } 266 + 267 + void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd) 268 + { 269 + struct hfi1_ctxtdata *uctxt = fd->uctxt; 270 + 276 271 /* 277 272 * The notifier would have been removed when the process'es mm 278 273 * was freed. 279 274 */ 280 - if (fd->handler) 275 + if (fd->handler) { 281 276 hfi1_mmu_rb_unregister(fd->handler); 282 - 283 - kfree(fd->invalid_tids); 284 - 285 - if (!uctxt->cnt) { 277 + } else { 286 278 if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list)) 287 279 unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd); 288 280 if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list)) 289 281 unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd); 290 - list_for_each_entry_safe(grp, gptr, &uctxt->tid_group_list.list, 291 - list) { 292 - list_del_init(&grp->list); 293 - kfree(grp); 294 - } 295 - hfi1_clear_tids(uctxt); 296 282 } 297 283 284 + kfree(fd->invalid_tids); 285 + fd->invalid_tids = NULL; 286 + 298 287 kfree(fd->entry_to_rb); 299 - return 0; 288 + fd->entry_to_rb = NULL; 300 289 } 301 290 302 291 /* ··· 368 351 * can fit into the group. If the group becomes fully 369 352 * used, move it to tid_full_list. 370 353 */ 371 - int hfi1_user_exp_rcv_setup(struct file *fp, struct hfi1_tid_info *tinfo) 354 + int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd, 355 + struct hfi1_tid_info *tinfo) 372 356 { 373 357 int ret = 0, need_group = 0, pinned; 374 - struct hfi1_filedata *fd = fp->private_data; 375 358 struct hfi1_ctxtdata *uctxt = fd->uctxt; 376 359 struct hfi1_devdata *dd = uctxt->dd; 377 360 unsigned npages, ngroups, pageidx = 0, pageset_count, npagesets, ··· 468 451 struct tid_group *grp = 469 452 tid_group_pop(&uctxt->tid_group_list); 470 453 471 - ret = program_rcvarray(fp, vaddr, grp, pagesets, 454 + ret = program_rcvarray(fd, vaddr, grp, pagesets, 472 455 pageidx, dd->rcv_entries.group_size, 473 456 pages, tidlist, &tididx, &mapped); 474 457 /* ··· 514 497 unsigned use = min_t(unsigned, pageset_count - pageidx, 515 498 grp->size - grp->used); 516 499 517 - ret = program_rcvarray(fp, vaddr, grp, pagesets, 500 + ret = program_rcvarray(fd, vaddr, grp, pagesets, 518 501 pageidx, use, pages, tidlist, 519 502 &tididx, &mapped); 520 503 if (ret < 0) { ··· 564 547 * everything done so far so we don't leak resources. 565 548 */ 566 549 tinfo->tidlist = (unsigned long)&tidlist; 567 - hfi1_user_exp_rcv_clear(fp, tinfo); 550 + hfi1_user_exp_rcv_clear(fd, tinfo); 568 551 tinfo->tidlist = 0; 569 552 ret = -EFAULT; 570 553 goto bail; ··· 588 571 return ret > 0 ? 0 : ret; 589 572 } 590 573 591 - int hfi1_user_exp_rcv_clear(struct file *fp, struct hfi1_tid_info *tinfo) 574 + int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd, 575 + struct hfi1_tid_info *tinfo) 592 576 { 593 577 int ret = 0; 594 - struct hfi1_filedata *fd = fp->private_data; 595 578 struct hfi1_ctxtdata *uctxt = fd->uctxt; 596 579 u32 *tidinfo; 597 580 unsigned tididx; ··· 606 589 607 590 mutex_lock(&uctxt->exp_lock); 608 591 for (tididx = 0; tididx < tinfo->tidcnt; tididx++) { 609 - ret = unprogram_rcvarray(fp, tidinfo[tididx], NULL); 592 + ret = unprogram_rcvarray(fd, tidinfo[tididx], NULL); 610 593 if (ret) { 611 594 hfi1_cdbg(TID, "Failed to unprogram rcv array %d", 612 595 ret); ··· 623 606 return ret; 624 607 } 625 608 626 - int hfi1_user_exp_rcv_invalid(struct file *fp, struct hfi1_tid_info *tinfo) 609 + int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd, 610 + struct hfi1_tid_info *tinfo) 627 611 { 628 - struct hfi1_filedata *fd = fp->private_data; 629 612 struct hfi1_ctxtdata *uctxt = fd->uctxt; 630 613 unsigned long *ev = uctxt->dd->events + 631 614 (((uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) * ··· 740 723 741 724 /** 742 725 * program_rcvarray() - program an RcvArray group with receive buffers 743 - * @fp: file pointer 726 + * @fd: filedata pointer 744 727 * @vaddr: starting user virtual address 745 728 * @grp: RcvArray group 746 729 * @sets: array of struct tid_pageset holding information on physically ··· 765 748 * -ENOMEM or -EFAULT on error from set_rcvarray_entry(), or 766 749 * number of RcvArray entries programmed. 767 750 */ 768 - static int program_rcvarray(struct file *fp, unsigned long vaddr, 751 + static int program_rcvarray(struct hfi1_filedata *fd, unsigned long vaddr, 769 752 struct tid_group *grp, 770 753 struct tid_pageset *sets, 771 754 unsigned start, u16 count, struct page **pages, 772 755 u32 *tidlist, unsigned *tididx, unsigned *pmapped) 773 756 { 774 - struct hfi1_filedata *fd = fp->private_data; 775 757 struct hfi1_ctxtdata *uctxt = fd->uctxt; 776 758 struct hfi1_devdata *dd = uctxt->dd; 777 759 u16 idx; ··· 811 795 npages = sets[setidx].count; 812 796 pageidx = sets[setidx].idx; 813 797 814 - ret = set_rcvarray_entry(fp, vaddr + (pageidx * PAGE_SIZE), 798 + ret = set_rcvarray_entry(fd, vaddr + (pageidx * PAGE_SIZE), 815 799 rcventry, grp, pages + pageidx, 816 800 npages); 817 801 if (ret) ··· 833 817 return idx; 834 818 } 835 819 836 - static int set_rcvarray_entry(struct file *fp, unsigned long vaddr, 820 + static int set_rcvarray_entry(struct hfi1_filedata *fd, unsigned long vaddr, 837 821 u32 rcventry, struct tid_group *grp, 838 822 struct page **pages, unsigned npages) 839 823 { 840 824 int ret; 841 - struct hfi1_filedata *fd = fp->private_data; 842 825 struct hfi1_ctxtdata *uctxt = fd->uctxt; 843 826 struct tid_rb_node *node; 844 827 struct hfi1_devdata *dd = uctxt->dd; ··· 891 876 return 0; 892 877 } 893 878 894 - static int unprogram_rcvarray(struct file *fp, u32 tidinfo, 879 + static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo, 895 880 struct tid_group **grp) 896 881 { 897 - struct hfi1_filedata *fd = fp->private_data; 898 882 struct hfi1_ctxtdata *uctxt = fd->uctxt; 899 883 struct hfi1_devdata *dd = uctxt->dd; 900 884 struct tid_rb_node *node;
+11 -6
drivers/infiniband/hw/hfi1/user_exp_rcv.h
··· 1 1 #ifndef _HFI1_USER_EXP_RCV_H 2 2 #define _HFI1_USER_EXP_RCV_H 3 3 /* 4 - * Copyright(c) 2015, 2016 Intel Corporation. 4 + * Copyright(c) 2015 - 2017 Intel Corporation. 5 5 * 6 6 * This file is provided under a dual BSD/GPLv2 license. When using or 7 7 * redistributing this file, you may do so under either license. ··· 70 70 (tid) |= EXP_TID_SET(field, (value)); \ 71 71 } while (0) 72 72 73 - int hfi1_user_exp_rcv_init(struct file *); 74 - int hfi1_user_exp_rcv_free(struct hfi1_filedata *); 75 - int hfi1_user_exp_rcv_setup(struct file *, struct hfi1_tid_info *); 76 - int hfi1_user_exp_rcv_clear(struct file *, struct hfi1_tid_info *); 77 - int hfi1_user_exp_rcv_invalid(struct file *, struct hfi1_tid_info *); 73 + void hfi1_user_exp_rcv_grp_free(struct hfi1_ctxtdata *uctxt); 74 + int hfi1_user_exp_rcv_grp_init(struct hfi1_filedata *fd); 75 + int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd); 76 + void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd); 77 + int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd, 78 + struct hfi1_tid_info *tinfo); 79 + int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd, 80 + struct hfi1_tid_info *tinfo); 81 + int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd, 82 + struct hfi1_tid_info *tinfo); 78 83 79 84 #endif /* _HFI1_USER_EXP_RCV_H */
+93 -98
drivers/infiniband/hw/hfi1/user_sdma.c
··· 1 1 /* 2 - * Copyright(c) 2015, 2016 Intel Corporation. 2 + * Copyright(c) 2015 - 2017 Intel Corporation. 3 3 * 4 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 5 * redistributing this file, you may do so under either license. ··· 143 143 144 144 /* KDETH OM multipliers and switch over point */ 145 145 #define KDETH_OM_SMALL 4 146 + #define KDETH_OM_SMALL_SHIFT 2 146 147 #define KDETH_OM_LARGE 64 148 + #define KDETH_OM_LARGE_SHIFT 6 147 149 #define KDETH_OM_MAX_SIZE (1 << ((KDETH_OM_LARGE / KDETH_OM_SMALL) + 1)) 148 150 149 151 /* Tx request flag bits */ ··· 155 153 /* SDMA request flag bits */ 156 154 #define SDMA_REQ_FOR_THREAD 1 157 155 #define SDMA_REQ_SEND_DONE 2 158 - #define SDMA_REQ_HAVE_AHG 3 159 - #define SDMA_REQ_HAS_ERROR 4 160 - #define SDMA_REQ_DONE_ERROR 5 156 + #define SDMA_REQ_HAS_ERROR 3 157 + #define SDMA_REQ_DONE_ERROR 4 161 158 162 159 #define SDMA_PKT_Q_INACTIVE BIT(0) 163 160 #define SDMA_PKT_Q_ACTIVE BIT(1) ··· 215 214 * each request will need it's own engine pointer. 216 215 */ 217 216 struct sdma_engine *sde; 218 - u8 ahg_idx; 217 + s8 ahg_idx; 219 218 u32 ahg[9]; 220 219 /* 221 220 * KDETH.Offset (Eager) field ··· 229 228 * size of the TID entry. 230 229 */ 231 230 u32 tidoffset; 232 - /* 233 - * KDETH.OM 234 - * Remember this because the header template always sets it 235 - * to 0. 236 - */ 237 - u8 omfactor; 238 231 /* 239 232 * We copy the iovs for this request (based on 240 233 * info.iovcnt). These are only the data vectors ··· 279 284 hfi1_cdbg(SDMA, "[%u:%u:%u] " fmt, (pq)->dd->unit, (pq)->ctxt, \ 280 285 (pq)->subctxt, ##__VA_ARGS__) 281 286 282 - static int user_sdma_send_pkts(struct user_sdma_request *, unsigned); 283 - static int num_user_pages(const struct iovec *); 284 - static void user_sdma_txreq_cb(struct sdma_txreq *, int); 285 - static inline void pq_update(struct hfi1_user_sdma_pkt_q *); 286 - static void user_sdma_free_request(struct user_sdma_request *, bool); 287 - static int pin_vector_pages(struct user_sdma_request *, 288 - struct user_sdma_iovec *); 289 - static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned, 290 - unsigned); 291 - static int check_header_template(struct user_sdma_request *, 292 - struct hfi1_pkt_header *, u32, u32); 293 - static int set_txreq_header(struct user_sdma_request *, 294 - struct user_sdma_txreq *, u32); 295 - static int set_txreq_header_ahg(struct user_sdma_request *, 296 - struct user_sdma_txreq *, u32); 297 - static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *, 298 - struct hfi1_user_sdma_comp_q *, 299 - u16, enum hfi1_sdma_comp_state, int); 300 - static inline u32 set_pkt_bth_psn(__be32, u8, u32); 287 + static int user_sdma_send_pkts(struct user_sdma_request *req, 288 + unsigned maxpkts); 289 + static int num_user_pages(const struct iovec *iov); 290 + static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status); 291 + static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq); 292 + static void user_sdma_free_request(struct user_sdma_request *req, bool unpin); 293 + static int pin_vector_pages(struct user_sdma_request *req, 294 + struct user_sdma_iovec *iovec); 295 + static void unpin_vector_pages(struct mm_struct *mm, struct page **pages, 296 + unsigned start, unsigned npages); 297 + static int check_header_template(struct user_sdma_request *req, 298 + struct hfi1_pkt_header *hdr, u32 lrhlen, 299 + u32 datalen); 300 + static int set_txreq_header(struct user_sdma_request *req, 301 + struct user_sdma_txreq *tx, u32 datalen); 302 + static int set_txreq_header_ahg(struct user_sdma_request *req, 303 + struct user_sdma_txreq *tx, u32 len); 304 + static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq, 305 + struct hfi1_user_sdma_comp_q *cq, 306 + u16 idx, enum hfi1_sdma_comp_state state, 307 + int ret); 308 + static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags); 301 309 static inline u32 get_lrh_len(struct hfi1_pkt_header, u32 len); 302 310 303 311 static int defer_packet_queue( 304 - struct sdma_engine *, 305 - struct iowait *, 306 - struct sdma_txreq *, 307 - unsigned seq); 308 - static void activate_packet_queue(struct iowait *, int); 309 - static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long); 310 - static int sdma_rb_insert(void *, struct mmu_rb_node *); 312 + struct sdma_engine *sde, 313 + struct iowait *wait, 314 + struct sdma_txreq *txreq, 315 + unsigned int seq); 316 + static void activate_packet_queue(struct iowait *wait, int reason); 317 + static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr, 318 + unsigned long len); 319 + static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode); 311 320 static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode, 312 321 void *arg2, bool *stop); 313 - static void sdma_rb_remove(void *, struct mmu_rb_node *); 314 - static int sdma_rb_invalidate(void *, struct mmu_rb_node *); 322 + static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode); 323 + static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode); 315 324 316 325 static struct mmu_rb_ops sdma_rb_ops = { 317 326 .filter = sdma_rb_filter, ··· 371 372 memset(tx, 0, sizeof(*tx)); 372 373 } 373 374 374 - int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp) 375 + int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, 376 + struct hfi1_filedata *fd) 375 377 { 376 - struct hfi1_filedata *fd; 377 - int ret = 0; 378 + int ret = -ENOMEM; 378 379 char buf[64]; 379 380 struct hfi1_devdata *dd; 380 381 struct hfi1_user_sdma_comp_q *cq; 381 382 struct hfi1_user_sdma_pkt_q *pq; 382 383 unsigned long flags; 383 384 384 - if (!uctxt || !fp) { 385 - ret = -EBADF; 386 - goto done; 387 - } 385 + if (!uctxt || !fd) 386 + return -EBADF; 388 387 389 - fd = fp->private_data; 390 - 391 - if (!hfi1_sdma_comp_ring_size) { 392 - ret = -EINVAL; 393 - goto done; 394 - } 388 + if (!hfi1_sdma_comp_ring_size) 389 + return -EINVAL; 395 390 396 391 dd = uctxt->dd; 397 392 398 393 pq = kzalloc(sizeof(*pq), GFP_KERNEL); 399 394 if (!pq) 400 - goto pq_nomem; 401 - 402 - pq->reqs = kcalloc(hfi1_sdma_comp_ring_size, 403 - sizeof(*pq->reqs), 404 - GFP_KERNEL); 405 - if (!pq->reqs) 406 - goto pq_reqs_nomem; 407 - 408 - pq->req_in_use = kcalloc(BITS_TO_LONGS(hfi1_sdma_comp_ring_size), 409 - sizeof(*pq->req_in_use), 410 - GFP_KERNEL); 411 - if (!pq->req_in_use) 412 - goto pq_reqs_no_in_use; 395 + return -ENOMEM; 413 396 414 397 INIT_LIST_HEAD(&pq->list); 415 398 pq->dd = dd; ··· 407 426 iowait_init(&pq->busy, 0, NULL, defer_packet_queue, 408 427 activate_packet_queue, NULL); 409 428 pq->reqidx = 0; 429 + 430 + pq->reqs = kcalloc(hfi1_sdma_comp_ring_size, 431 + sizeof(*pq->reqs), 432 + GFP_KERNEL); 433 + if (!pq->reqs) 434 + goto pq_reqs_nomem; 435 + 436 + pq->req_in_use = kcalloc(BITS_TO_LONGS(hfi1_sdma_comp_ring_size), 437 + sizeof(*pq->req_in_use), 438 + GFP_KERNEL); 439 + if (!pq->req_in_use) 440 + goto pq_reqs_no_in_use; 441 + 410 442 snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt, 411 443 fd->subctxt); 412 444 pq->txreq_cache = kmem_cache_create(buf, 413 - sizeof(struct user_sdma_txreq), 445 + sizeof(struct user_sdma_txreq), 414 446 L1_CACHE_BYTES, 415 447 SLAB_HWCACHE_ALIGN, 416 448 sdma_kmem_cache_ctor); ··· 432 438 uctxt->ctxt); 433 439 goto pq_txreq_nomem; 434 440 } 435 - fd->pq = pq; 441 + 436 442 cq = kzalloc(sizeof(*cq), GFP_KERNEL); 437 443 if (!cq) 438 444 goto cq_nomem; ··· 443 449 goto cq_comps_nomem; 444 450 445 451 cq->nentries = hfi1_sdma_comp_ring_size; 446 - fd->cq = cq; 447 452 448 453 ret = hfi1_mmu_rb_register(pq, pq->mm, &sdma_rb_ops, dd->pport->hfi1_wq, 449 454 &pq->handler); 450 455 if (ret) { 451 456 dd_dev_err(dd, "Failed to register with MMU %d", ret); 452 - goto done; 457 + goto pq_mmu_fail; 453 458 } 459 + 460 + fd->pq = pq; 461 + fd->cq = cq; 454 462 455 463 spin_lock_irqsave(&uctxt->sdma_qlock, flags); 456 464 list_add(&pq->list, &uctxt->sdma_queues); 457 465 spin_unlock_irqrestore(&uctxt->sdma_qlock, flags); 458 - goto done; 459 466 467 + return 0; 468 + 469 + pq_mmu_fail: 470 + vfree(cq->comps); 460 471 cq_comps_nomem: 461 472 kfree(cq); 462 473 cq_nomem: ··· 472 473 kfree(pq->reqs); 473 474 pq_reqs_nomem: 474 475 kfree(pq); 475 - fd->pq = NULL; 476 - pq_nomem: 477 - ret = -ENOMEM; 478 - done: 476 + 479 477 return ret; 480 478 } 481 479 ··· 532 536 return mapping[hash]; 533 537 } 534 538 535 - int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, 536 - unsigned long dim, unsigned long *count) 539 + int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, 540 + struct iovec *iovec, unsigned long dim, 541 + unsigned long *count) 537 542 { 538 543 int ret = 0, i; 539 - struct hfi1_filedata *fd = fp->private_data; 540 544 struct hfi1_ctxtdata *uctxt = fd->uctxt; 541 545 struct hfi1_user_sdma_pkt_q *pq = fd->pq; 542 546 struct hfi1_user_sdma_comp_q *cq = fd->cq; ··· 612 616 req->pq = pq; 613 617 req->cq = cq; 614 618 req->status = -1; 619 + req->ahg_idx = -1; 615 620 INIT_LIST_HEAD(&req->txps); 616 621 617 622 memcpy(&req->info, &info, sizeof(info)); ··· 763 766 } 764 767 765 768 /* We don't need an AHG entry if the request contains only one packet */ 766 - if (req->info.npkts > 1 && HFI1_CAP_IS_USET(SDMA_AHG)) { 767 - int ahg = sdma_ahg_alloc(req->sde); 768 - 769 - if (likely(ahg >= 0)) { 770 - req->ahg_idx = (u8)ahg; 771 - set_bit(SDMA_REQ_HAVE_AHG, &req->flags); 772 - } 773 - } 769 + if (req->info.npkts > 1 && HFI1_CAP_IS_USET(SDMA_AHG)) 770 + req->ahg_idx = sdma_ahg_alloc(req->sde); 774 771 775 772 set_comp_state(pq, cq, info.comp_idx, QUEUED, 0); 776 773 atomic_inc(&pq->n_reqs); ··· 982 991 } 983 992 } 984 993 985 - if (test_bit(SDMA_REQ_HAVE_AHG, &req->flags)) { 994 + if (req->ahg_idx >= 0) { 986 995 if (!req->seqnum) { 987 996 u16 pbclen = le16_to_cpu(req->hdr.pbc[0]); 988 997 u32 lrhlen = get_lrh_len(req->hdr, ··· 1112 1121 * happen due to the sequential manner in which 1113 1122 * descriptors are processed. 1114 1123 */ 1115 - if (test_bit(SDMA_REQ_HAVE_AHG, &req->flags)) 1124 + if (req->ahg_idx >= 0) 1116 1125 sdma_ahg_free(req->sde, req->ahg_idx); 1117 1126 } 1118 1127 return ret; ··· 1314 1323 { 1315 1324 struct hfi1_user_sdma_pkt_q *pq = req->pq; 1316 1325 struct hfi1_pkt_header *hdr = &tx->hdr; 1326 + u8 omfactor; /* KDETH.OM */ 1317 1327 u16 pbclen; 1318 1328 int ret; 1319 1329 u32 tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen)); ··· 1392 1400 } 1393 1401 tidval = req->tids[req->tididx]; 1394 1402 } 1395 - req->omfactor = EXP_TID_GET(tidval, LEN) * PAGE_SIZE >= 1396 - KDETH_OM_MAX_SIZE ? KDETH_OM_LARGE : KDETH_OM_SMALL; 1403 + omfactor = EXP_TID_GET(tidval, LEN) * PAGE_SIZE >= 1404 + KDETH_OM_MAX_SIZE ? KDETH_OM_LARGE_SHIFT : 1405 + KDETH_OM_SMALL_SHIFT; 1397 1406 /* Set KDETH.TIDCtrl based on value for this TID. */ 1398 1407 KDETH_SET(hdr->kdeth.ver_tid_offset, TIDCTRL, 1399 1408 EXP_TID_GET(tidval, CTRL)); ··· 1409 1416 * transfer. 1410 1417 */ 1411 1418 SDMA_DBG(req, "TID offset %ubytes %uunits om%u", 1412 - req->tidoffset, req->tidoffset / req->omfactor, 1413 - req->omfactor != KDETH_OM_SMALL); 1419 + req->tidoffset, req->tidoffset >> omfactor, 1420 + omfactor != KDETH_OM_SMALL_SHIFT); 1414 1421 KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET, 1415 - req->tidoffset / req->omfactor); 1422 + req->tidoffset >> omfactor); 1416 1423 KDETH_SET(hdr->kdeth.ver_tid_offset, OM, 1417 - req->omfactor != KDETH_OM_SMALL); 1424 + omfactor != KDETH_OM_SMALL_SHIFT); 1418 1425 } 1419 1426 done: 1420 1427 trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt, ··· 1426 1433 struct user_sdma_txreq *tx, u32 len) 1427 1434 { 1428 1435 int diff = 0; 1436 + u8 omfactor; /* KDETH.OM */ 1429 1437 struct hfi1_user_sdma_pkt_q *pq = req->pq; 1430 1438 struct hfi1_pkt_header *hdr = &req->hdr; 1431 1439 u16 pbclen = le16_to_cpu(hdr->pbc[0]); ··· 1478 1484 } 1479 1485 tidval = req->tids[req->tididx]; 1480 1486 } 1481 - req->omfactor = ((EXP_TID_GET(tidval, LEN) * 1487 + omfactor = ((EXP_TID_GET(tidval, LEN) * 1482 1488 PAGE_SIZE) >= 1483 - KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE : 1484 - KDETH_OM_SMALL; 1489 + KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE_SHIFT : 1490 + KDETH_OM_SMALL_SHIFT; 1485 1491 /* KDETH.OM and KDETH.OFFSET (TID) */ 1486 1492 AHG_HEADER_SET(req->ahg, diff, 7, 0, 16, 1487 - ((!!(req->omfactor - KDETH_OM_SMALL)) << 15 | 1488 - ((req->tidoffset / req->omfactor) & 0x7fff))); 1493 + ((!!(omfactor - KDETH_OM_SMALL_SHIFT)) << 15 | 1494 + ((req->tidoffset >> omfactor) 1495 + & 0x7fff))); 1489 1496 /* KDETH.TIDCtrl, KDETH.TID, KDETH.Intr, KDETH.SH */ 1490 1497 val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) | 1491 1498 (EXP_TID_GET(tidval, IDX) & 0x3ff));
+12 -6
drivers/infiniband/hw/hfi1/user_sdma.h
··· 1 + #ifndef _HFI1_USER_SDMA_H 2 + #define _HFI1_USER_SDMA_H 1 3 /* 2 - * Copyright(c) 2015, 2016 Intel Corporation. 4 + * Copyright(c) 2015 - 2017 Intel Corporation. 3 5 * 4 6 * This file is provided under a dual BSD/GPLv2 license. When using or 5 7 * redistributing this file, you may do so under either license. ··· 58 56 struct hfi1_user_sdma_pkt_q { 59 57 struct list_head list; 60 58 unsigned ctxt; 61 - unsigned subctxt; 59 + u16 subctxt; 62 60 u16 n_max_reqs; 63 61 atomic_t n_reqs; 64 62 u16 reqidx; ··· 80 78 struct hfi1_sdma_comp_entry *comps; 81 79 }; 82 80 83 - int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *, struct file *); 84 - int hfi1_user_sdma_free_queues(struct hfi1_filedata *); 85 - int hfi1_user_sdma_process_request(struct file *, struct iovec *, unsigned long, 86 - unsigned long *); 81 + int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, 82 + struct hfi1_filedata *fd); 83 + int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd); 84 + int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, 85 + struct iovec *iovec, unsigned long dim, 86 + unsigned long *count); 87 + 88 + #endif /* _HFI1_USER_SDMA_H */
+4 -1
drivers/infiniband/hw/hfi1/verbs.h
··· 125 125 struct sdma_engine *s_sde; /* current sde */ 126 126 struct send_context *s_sendcontext; /* current sendcontext */ 127 127 u8 s_sc; /* SC[0..4] for next packet */ 128 - u8 r_adefered; /* number of acks defered */ 129 128 struct iowait s_iowait; 130 129 struct rvt_qp *owner; 131 130 }; ··· 139 140 struct hfi1_pportdata *ppd; 140 141 struct verbs_txreq *s_txreq; 141 142 unsigned long flags; 143 + unsigned long timeout; 144 + unsigned long timeout_int; 145 + int cpu; 146 + bool in_thread; 142 147 }; 143 148 144 149 #define HFI1_PSN_CREDIT 16
+2 -6
drivers/infiniband/hw/hfi1/vnic_main.c
··· 67 67 unsigned int rcvctrl_ops = 0; 68 68 int ret; 69 69 70 - ret = hfi1_init_ctxt(uctxt->sc); 71 - if (ret) 72 - goto done; 70 + hfi1_init_ctxt(uctxt->sc); 73 71 74 72 uctxt->do_interrupt = &handle_receive_interrupt; 75 73 ··· 79 81 ret = hfi1_setup_eagerbufs(uctxt); 80 82 if (ret) 81 83 goto done; 82 - 83 - set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags); 84 84 85 85 if (uctxt->rcvhdrtail_kvaddr) 86 86 clear_rcvhdrtail(uctxt); ··· 205 209 uctxt->event_flags = 0; 206 210 207 211 hfi1_clear_tids(uctxt); 208 - hfi1_clear_ctxt_pkey(dd, uctxt->ctxt); 212 + hfi1_clear_ctxt_pkey(dd, uctxt); 209 213 210 214 hfi1_stats.sps_ctxts--; 211 215 hfi1_free_ctxtdata(dd, uctxt);
+22
drivers/infiniband/hw/mlx5/main.c
··· 3530 3530 return num_counters; 3531 3531 } 3532 3532 3533 + static struct net_device* 3534 + mlx5_ib_alloc_rdma_netdev(struct ib_device *hca, 3535 + u8 port_num, 3536 + enum rdma_netdev_t type, 3537 + const char *name, 3538 + unsigned char name_assign_type, 3539 + void (*setup)(struct net_device *)) 3540 + { 3541 + if (type != RDMA_NETDEV_IPOIB) 3542 + return ERR_PTR(-EOPNOTSUPP); 3543 + 3544 + return mlx5_rdma_netdev_alloc(to_mdev(hca)->mdev, hca, 3545 + name, setup); 3546 + } 3547 + 3548 + static void mlx5_ib_free_rdma_netdev(struct net_device *netdev) 3549 + { 3550 + return mlx5_rdma_netdev_free(netdev); 3551 + } 3552 + 3533 3553 static void *mlx5_ib_add(struct mlx5_core_dev *mdev) 3534 3554 { 3535 3555 struct mlx5_ib_dev *dev; ··· 3680 3660 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; 3681 3661 dev->ib_dev.get_port_immutable = mlx5_port_immutable; 3682 3662 dev->ib_dev.get_dev_fw_str = get_dev_fw_str; 3663 + dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev; 3664 + dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev; 3683 3665 if (mlx5_core_is_pf(mdev)) { 3684 3666 dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config; 3685 3667 dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;
+1 -1
drivers/infiniband/sw/rxe/rxe_mr.c
··· 368 368 ((void *)(uintptr_t)iova) : addr; 369 369 370 370 if (crcp) 371 - crc = rxe_crc32(to_rdev(mem->pd->ibpd.device), 371 + *crcp = rxe_crc32(to_rdev(mem->pd->ibpd.device), 372 372 *crcp, src, length); 373 373 374 374 memcpy(dest, src, length);
-1
drivers/infiniband/sw/rxe/rxe_param.h
··· 114 114 RXE_MAX_UCONTEXT = 512, 115 115 116 116 RXE_NUM_PORT = 1, 117 - RXE_NUM_COMP_VECTORS = 1, 118 117 119 118 RXE_MIN_QP_INDEX = 16, 120 119 RXE_MAX_QP_INDEX = 0x00020000,
+1 -1
drivers/infiniband/sw/rxe/rxe_verbs.c
··· 1239 1239 dev->owner = THIS_MODULE; 1240 1240 dev->node_type = RDMA_NODE_IB_CA; 1241 1241 dev->phys_port_cnt = 1; 1242 - dev->num_comp_vectors = RXE_NUM_COMP_VECTORS; 1242 + dev->num_comp_vectors = num_possible_cpus(); 1243 1243 dev->dev.parent = rxe_dma_device(rxe); 1244 1244 dev->local_dma_lkey = 0; 1245 1245 addrconf_addr_eui48((unsigned char *)&dev->node_guid,
+59
drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
··· 155 155 return -EOPNOTSUPP; 156 156 } 157 157 158 + /* Return lane speed in unit of 1e6 bit/sec */ 159 + static inline int ib_speed_enum_to_int(int speed) 160 + { 161 + switch (speed) { 162 + case IB_SPEED_SDR: 163 + return SPEED_2500; 164 + case IB_SPEED_DDR: 165 + return SPEED_5000; 166 + case IB_SPEED_QDR: 167 + case IB_SPEED_FDR10: 168 + return SPEED_10000; 169 + case IB_SPEED_FDR: 170 + return SPEED_14000; 171 + case IB_SPEED_EDR: 172 + return SPEED_25000; 173 + } 174 + 175 + return SPEED_UNKNOWN; 176 + } 177 + 178 + static int ipoib_get_link_ksettings(struct net_device *netdev, 179 + struct ethtool_link_ksettings *cmd) 180 + { 181 + struct ipoib_dev_priv *priv = netdev_priv(netdev); 182 + struct ib_port_attr attr; 183 + int ret, speed, width; 184 + 185 + if (!netif_carrier_ok(netdev)) { 186 + cmd->base.speed = SPEED_UNKNOWN; 187 + cmd->base.duplex = DUPLEX_UNKNOWN; 188 + return 0; 189 + } 190 + 191 + ret = ib_query_port(priv->ca, priv->port, &attr); 192 + if (ret < 0) 193 + return -EINVAL; 194 + 195 + speed = ib_speed_enum_to_int(attr.active_speed); 196 + width = ib_width_enum_to_int(attr.active_width); 197 + 198 + if (speed < 0 || width < 0) 199 + return -EINVAL; 200 + 201 + /* Except the following are set, the other members of 202 + * the struct ethtool_link_settings are initialized to 203 + * zero in the function __ethtool_get_link_ksettings. 204 + */ 205 + cmd->base.speed = speed * width; 206 + cmd->base.duplex = DUPLEX_FULL; 207 + 208 + cmd->base.phy_address = 0xFF; 209 + 210 + cmd->base.autoneg = AUTONEG_ENABLE; 211 + cmd->base.port = PORT_OTHER; 212 + 213 + return 0; 214 + } 215 + 158 216 static const struct ethtool_ops ipoib_ethtool_ops = { 217 + .get_link_ksettings = ipoib_get_link_ksettings, 159 218 .get_drvinfo = ipoib_get_drvinfo, 160 219 .get_coalesce = ipoib_get_coalesce, 161 220 .set_coalesce = ipoib_set_coalesce,
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/Kconfig
··· 35 35 config MLX5_CORE_IPOIB 36 36 bool "Mellanox Technologies ConnectX-4 IPoIB offloads support" 37 37 depends on MLX5_CORE_EN 38 - default y 38 + default n 39 39 ---help--- 40 40 MLX5 IPoIB offloads & acceleration support.
+26 -18
drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
··· 30 30 * SOFTWARE. 31 31 */ 32 32 33 + #include <rdma/ib_verbs.h> 33 34 #include <linux/mlx5/fs.h> 34 35 #include "en.h" 35 36 #include "ipoib.h" ··· 360 359 return 0; 361 360 } 362 361 363 - #ifdef notusedyet 364 362 /* IPoIB RDMA netdev callbacks */ 365 363 static int mlx5i_attach_mcast(struct net_device *netdev, struct ib_device *hca, 366 - union ib_gid *gid, u16 lid, int set_qkey) 364 + union ib_gid *gid, u16 lid, int set_qkey, 365 + u32 qkey) 367 366 { 368 367 struct mlx5e_priv *epriv = mlx5i_epriv(netdev); 369 368 struct mlx5_core_dev *mdev = epriv->mdev; ··· 375 374 if (err) 376 375 mlx5_core_warn(mdev, "failed attaching QPN 0x%x, MGID %pI6\n", 377 376 ipriv->qp.qpn, gid->raw); 377 + 378 + if (set_qkey) { 379 + mlx5_core_dbg(mdev, "%s setting qkey 0x%x\n", 380 + netdev->name, qkey); 381 + ipriv->qkey = qkey; 382 + } 378 383 379 384 return err; 380 385 } ··· 404 397 } 405 398 406 399 static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb, 407 - struct ib_ah *address, u32 dqpn, u32 dqkey) 400 + struct ib_ah *address, u32 dqpn) 408 401 { 409 402 struct mlx5e_priv *epriv = mlx5i_epriv(dev); 410 403 struct mlx5e_txqsq *sq = epriv->txq2sq[skb_get_queue_mapping(skb)]; 411 404 struct mlx5_ib_ah *mah = to_mah(address); 405 + struct mlx5i_priv *ipriv = epriv->ppriv; 412 406 413 - return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, dqkey); 407 + return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey); 414 408 } 415 - #endif 416 409 417 410 static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev) 418 411 { ··· 421 414 422 415 if (!MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) { 423 416 mlx5_core_warn(mdev, "IPoIB enhanced offloads are not supported\n"); 424 - return -ENOTSUPP; 417 + return -EOPNOTSUPP; 425 418 } 426 419 427 420 return 0; 428 421 } 429 422 430 - static struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, 431 - struct ib_device *ibdev, 432 - const char *name, 433 - void (*setup)(struct net_device *)) 423 + struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, 424 + struct ib_device *ibdev, 425 + const char *name, 426 + void (*setup)(struct net_device *)) 434 427 { 435 428 const struct mlx5e_profile *profile = &mlx5i_nic_profile; 436 429 int nch = profile->max_nch(mdev); 437 430 struct net_device *netdev; 438 431 struct mlx5i_priv *ipriv; 439 432 struct mlx5e_priv *epriv; 433 + struct rdma_netdev *rn; 440 434 int err; 441 435 442 436 if (mlx5i_check_required_hca_cap(mdev)) { ··· 472 464 mlx5e_attach_netdev(epriv); 473 465 netif_carrier_off(netdev); 474 466 475 - /* TODO: set rdma_netdev func pointers 476 - * rn = &ipriv->rn; 477 - * rn->hca = ibdev; 478 - * rn->send = mlx5i_xmit; 479 - * rn->attach_mcast = mlx5i_attach_mcast; 480 - * rn->detach_mcast = mlx5i_detach_mcast; 481 - */ 467 + /* set rdma_netdev func pointers */ 468 + rn = &ipriv->rn; 469 + rn->hca = ibdev; 470 + rn->send = mlx5i_xmit; 471 + rn->attach_mcast = mlx5i_attach_mcast; 472 + rn->detach_mcast = mlx5i_detach_mcast; 473 + 482 474 return netdev; 483 475 484 476 err_free_netdev: ··· 490 482 } 491 483 EXPORT_SYMBOL(mlx5_rdma_netdev_alloc); 492 484 493 - static void mlx5_rdma_netdev_free(struct net_device *netdev) 485 + void mlx5_rdma_netdev_free(struct net_device *netdev) 494 486 { 495 487 struct mlx5e_priv *priv = mlx5i_epriv(netdev); 496 488 const struct mlx5e_profile *profile = priv->profile;
+2
drivers/net/ethernet/mellanox/mlx5/core/ipoib.h
··· 40 40 41 41 /* ipoib rdma netdev's private data structure */ 42 42 struct mlx5i_priv { 43 + struct rdma_netdev rn; /* keep this first */ 43 44 struct mlx5_core_qp qp; 45 + u32 qkey; 44 46 char *mlx5e_priv[0]; 45 47 }; 46 48
+19
include/linux/mlx5/driver.h
··· 1097 1097 struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); 1098 1098 void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); 1099 1099 1100 + #ifndef CONFIG_MLX5_CORE_IPOIB 1101 + static inline 1102 + struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, 1103 + struct ib_device *ibdev, 1104 + const char *name, 1105 + void (*setup)(struct net_device *)) 1106 + { 1107 + return ERR_PTR(-EOPNOTSUPP); 1108 + } 1109 + 1110 + static inline void mlx5_rdma_netdev_free(struct net_device *netdev) {} 1111 + #else 1112 + struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, 1113 + struct ib_device *ibdev, 1114 + const char *name, 1115 + void (*setup)(struct net_device *)); 1116 + void mlx5_rdma_netdev_free(struct net_device *netdev); 1117 + #endif /* CONFIG_MLX5_CORE_IPOIB */ 1118 + 1100 1119 struct mlx5_profile { 1101 1120 u64 mask; 1102 1121 u8 log_max_qp;
+1
include/rdma/rdmavt_qp.h
··· 324 324 u8 r_state; /* opcode of last packet received */ 325 325 u8 r_flags; 326 326 u8 r_head_ack_queue; /* index into s_ack_queue[] */ 327 + u8 r_adefered; /* defered ack count */ 327 328 328 329 struct list_head rspwait; /* link for waiting to respond */ 329 330
+1
include/uapi/linux/ethtool.h
··· 1494 1494 #define SPEED_2500 2500 1495 1495 #define SPEED_5000 5000 1496 1496 #define SPEED_10000 10000 1497 + #define SPEED_14000 14000 1497 1498 #define SPEED_20000 20000 1498 1499 #define SPEED_25000 25000 1499 1500 #define SPEED_40000 40000