Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'fixes' into next

We have a conflict in idxd driver between 'fixes' and 'next' and there
are patches dependent on this so, merge the 'fixes' branch into next

+127 -62
+2 -2
drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
··· 373 373 struct axi_dma_desc *first) 374 374 { 375 375 u32 priority = chan->chip->dw->hdata->priority[chan->id]; 376 - struct axi_dma_chan_config config; 376 + struct axi_dma_chan_config config = {}; 377 377 u32 irq_mask; 378 378 u8 lms = 0; /* Select AXI0 master for LLI fetching */ 379 379 ··· 391 391 config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC; 392 392 config.prior = priority; 393 393 config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW; 394 - config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW; 394 + config.hs_sel_src = DWAXIDMAC_HS_SEL_HW; 395 395 switch (chan->direction) { 396 396 case DMA_MEM_TO_DEV: 397 397 dw_axi_dma_set_byte_halfword(chan, true);
+1 -9
drivers/dma/dw-edma/dw-edma-pcie.c
··· 187 187 188 188 /* DMA configuration */ 189 189 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 190 - if (!err) { 190 + if (err) { 191 191 pci_err(pdev, "DMA mask 64 set failed\n"); 192 192 return err; 193 - } else { 194 - pci_err(pdev, "DMA mask 64 set failed\n"); 195 - 196 - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 197 - if (err) { 198 - pci_err(pdev, "DMA mask 32 set failed\n"); 199 - return err; 200 - } 201 193 } 202 194 203 195 /* Data structure allocation */
+1 -1
drivers/dma/idxd/irq.c
··· 318 318 INIT_WORK(&idxd->work, idxd_device_reinit); 319 319 queue_work(idxd->wq, &idxd->work); 320 320 } else { 321 - spin_lock(&idxd->dev_lock); 322 321 idxd->state = IDXD_DEV_HALTED; 323 322 idxd_wqs_quiesce(idxd); 324 323 idxd_wqs_unmap_portal(idxd); 324 + spin_lock(&idxd->dev_lock); 325 325 idxd_device_clear_state(idxd); 326 326 dev_err(&idxd->pdev->dev, 327 327 "idxd halted, need %s.\n",
+17 -1
drivers/dma/idxd/submit.c
··· 97 97 { 98 98 struct idxd_desc *d, *t, *found = NULL; 99 99 struct llist_node *head; 100 + LIST_HEAD(flist); 100 101 101 102 desc->completion->status = IDXD_COMP_DESC_ABORT; 102 103 /* ··· 112 111 found = desc; 113 112 continue; 114 113 } 115 - list_add_tail(&desc->list, &ie->work_list); 114 + 115 + if (d->completion->status) 116 + list_add_tail(&d->list, &flist); 117 + else 118 + list_add_tail(&d->list, &ie->work_list); 116 119 } 117 120 } 118 121 ··· 126 121 127 122 if (found) 128 123 idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, false); 124 + 125 + /* 126 + * completing the descriptor will return desc to allocator and 127 + * the desc can be acquired by a different process and the 128 + * desc->list can be modified. Delete desc from list so the 129 + * list trasversing does not get corrupted by the other process. 130 + */ 131 + list_for_each_entry_safe(d, t, &flist, list) { 132 + list_del_init(&d->list); 133 + idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, true); 134 + } 129 135 } 130 136 131 137 /*
+1 -1
drivers/dma/st_fdma.c
··· 874 874 MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver"); 875 875 MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>"); 876 876 MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>"); 877 - MODULE_ALIAS("platform: " DRIVER_NAME); 877 + MODULE_ALIAS("platform:" DRIVER_NAME);
+105 -48
drivers/dma/ti/k3-udma.c
··· 4535 4535 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4536 4536 if (IS_ERR(rm_res)) { 4537 4537 bitmap_zero(ud->tchan_map, ud->tchan_cnt); 4538 + irq_res.sets = 1; 4538 4539 } else { 4539 4540 bitmap_fill(ud->tchan_map, ud->tchan_cnt); 4540 4541 for (i = 0; i < rm_res->sets; i++) 4541 4542 udma_mark_resource_ranges(ud, ud->tchan_map, 4542 4543 &rm_res->desc[i], "tchan"); 4544 + irq_res.sets = rm_res->sets; 4543 4545 } 4544 - irq_res.sets = rm_res->sets; 4545 4546 4546 4547 /* rchan and matching default flow ranges */ 4547 4548 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4548 4549 if (IS_ERR(rm_res)) { 4549 4550 bitmap_zero(ud->rchan_map, ud->rchan_cnt); 4551 + irq_res.sets++; 4550 4552 } else { 4551 4553 bitmap_fill(ud->rchan_map, ud->rchan_cnt); 4552 4554 for (i = 0; i < rm_res->sets; i++) 4553 4555 udma_mark_resource_ranges(ud, ud->rchan_map, 4554 4556 &rm_res->desc[i], "rchan"); 4557 + irq_res.sets += rm_res->sets; 4555 4558 } 4556 4559 4557 - irq_res.sets += rm_res->sets; 4558 4560 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); 4561 + if (!irq_res.desc) 4562 + return -ENOMEM; 4559 4563 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4560 - for (i = 0; i < rm_res->sets; i++) { 4561 - irq_res.desc[i].start = rm_res->desc[i].start; 4562 - irq_res.desc[i].num = rm_res->desc[i].num; 4563 - irq_res.desc[i].start_sec = rm_res->desc[i].start_sec; 4564 - irq_res.desc[i].num_sec = rm_res->desc[i].num_sec; 4564 + if (IS_ERR(rm_res)) { 4565 + irq_res.desc[0].start = 0; 4566 + irq_res.desc[0].num = ud->tchan_cnt; 4567 + i = 1; 4568 + } else { 4569 + for (i = 0; i < rm_res->sets; i++) { 4570 + irq_res.desc[i].start = rm_res->desc[i].start; 4571 + irq_res.desc[i].num = rm_res->desc[i].num; 4572 + irq_res.desc[i].start_sec = rm_res->desc[i].start_sec; 4573 + irq_res.desc[i].num_sec = rm_res->desc[i].num_sec; 4574 + } 4565 4575 } 4566 4576 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4567 - for (j = 0; j < rm_res->sets; j++, i++) { 4568 - if (rm_res->desc[j].num) { 4569 - irq_res.desc[i].start = rm_res->desc[j].start + 4570 - ud->soc_data->oes.udma_rchan; 4571 - irq_res.desc[i].num = rm_res->desc[j].num; 4572 - } 4573 - if (rm_res->desc[j].num_sec) { 4574 - irq_res.desc[i].start_sec = rm_res->desc[j].start_sec + 4575 - ud->soc_data->oes.udma_rchan; 4576 - irq_res.desc[i].num_sec = rm_res->desc[j].num_sec; 4577 + if (IS_ERR(rm_res)) { 4578 + irq_res.desc[i].start = 0; 4579 + irq_res.desc[i].num = ud->rchan_cnt; 4580 + } else { 4581 + for (j = 0; j < rm_res->sets; j++, i++) { 4582 + if (rm_res->desc[j].num) { 4583 + irq_res.desc[i].start = rm_res->desc[j].start + 4584 + ud->soc_data->oes.udma_rchan; 4585 + irq_res.desc[i].num = rm_res->desc[j].num; 4586 + } 4587 + if (rm_res->desc[j].num_sec) { 4588 + irq_res.desc[i].start_sec = rm_res->desc[j].start_sec + 4589 + ud->soc_data->oes.udma_rchan; 4590 + irq_res.desc[i].num_sec = rm_res->desc[j].num_sec; 4591 + } 4577 4592 } 4578 4593 } 4579 4594 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); ··· 4706 4691 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN]; 4707 4692 if (IS_ERR(rm_res)) { 4708 4693 bitmap_zero(ud->bchan_map, ud->bchan_cnt); 4694 + irq_res.sets++; 4709 4695 } else { 4710 4696 bitmap_fill(ud->bchan_map, ud->bchan_cnt); 4711 4697 for (i = 0; i < rm_res->sets; i++) 4712 4698 udma_mark_resource_ranges(ud, ud->bchan_map, 4713 4699 &rm_res->desc[i], 4714 4700 "bchan"); 4701 + irq_res.sets += rm_res->sets; 4715 4702 } 4716 - irq_res.sets += rm_res->sets; 4717 4703 } 4718 4704 4719 4705 /* tchan ranges */ ··· 4722 4706 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4723 4707 if (IS_ERR(rm_res)) { 4724 4708 bitmap_zero(ud->tchan_map, ud->tchan_cnt); 4709 + irq_res.sets += 2; 4725 4710 } else { 4726 4711 bitmap_fill(ud->tchan_map, ud->tchan_cnt); 4727 4712 for (i = 0; i < rm_res->sets; i++) 4728 4713 udma_mark_resource_ranges(ud, ud->tchan_map, 4729 4714 &rm_res->desc[i], 4730 4715 "tchan"); 4716 + irq_res.sets += rm_res->sets * 2; 4731 4717 } 4732 - irq_res.sets += rm_res->sets * 2; 4733 4718 } 4734 4719 4735 4720 /* rchan ranges */ ··· 4738 4721 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4739 4722 if (IS_ERR(rm_res)) { 4740 4723 bitmap_zero(ud->rchan_map, ud->rchan_cnt); 4724 + irq_res.sets += 2; 4741 4725 } else { 4742 4726 bitmap_fill(ud->rchan_map, ud->rchan_cnt); 4743 4727 for (i = 0; i < rm_res->sets; i++) 4744 4728 udma_mark_resource_ranges(ud, ud->rchan_map, 4745 4729 &rm_res->desc[i], 4746 4730 "rchan"); 4731 + irq_res.sets += rm_res->sets * 2; 4747 4732 } 4748 - irq_res.sets += rm_res->sets * 2; 4749 4733 } 4750 4734 4751 4735 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); 4736 + if (!irq_res.desc) 4737 + return -ENOMEM; 4752 4738 if (ud->bchan_cnt) { 4753 4739 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN]; 4754 - for (i = 0; i < rm_res->sets; i++) { 4755 - irq_res.desc[i].start = rm_res->desc[i].start + 4756 - oes->bcdma_bchan_ring; 4757 - irq_res.desc[i].num = rm_res->desc[i].num; 4740 + if (IS_ERR(rm_res)) { 4741 + irq_res.desc[0].start = oes->bcdma_bchan_ring; 4742 + irq_res.desc[0].num = ud->bchan_cnt; 4743 + i = 1; 4744 + } else { 4745 + for (i = 0; i < rm_res->sets; i++) { 4746 + irq_res.desc[i].start = rm_res->desc[i].start + 4747 + oes->bcdma_bchan_ring; 4748 + irq_res.desc[i].num = rm_res->desc[i].num; 4749 + } 4758 4750 } 4759 4751 } 4760 4752 if (ud->tchan_cnt) { 4761 4753 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4762 - for (j = 0; j < rm_res->sets; j++, i += 2) { 4763 - irq_res.desc[i].start = rm_res->desc[j].start + 4764 - oes->bcdma_tchan_data; 4765 - irq_res.desc[i].num = rm_res->desc[j].num; 4754 + if (IS_ERR(rm_res)) { 4755 + irq_res.desc[i].start = oes->bcdma_tchan_data; 4756 + irq_res.desc[i].num = ud->tchan_cnt; 4757 + irq_res.desc[i + 1].start = oes->bcdma_tchan_ring; 4758 + irq_res.desc[i + 1].num = ud->tchan_cnt; 4759 + i += 2; 4760 + } else { 4761 + for (j = 0; j < rm_res->sets; j++, i += 2) { 4762 + irq_res.desc[i].start = rm_res->desc[j].start + 4763 + oes->bcdma_tchan_data; 4764 + irq_res.desc[i].num = rm_res->desc[j].num; 4766 4765 4767 - irq_res.desc[i + 1].start = rm_res->desc[j].start + 4768 - oes->bcdma_tchan_ring; 4769 - irq_res.desc[i + 1].num = rm_res->desc[j].num; 4766 + irq_res.desc[i + 1].start = rm_res->desc[j].start + 4767 + oes->bcdma_tchan_ring; 4768 + irq_res.desc[i + 1].num = rm_res->desc[j].num; 4769 + } 4770 4770 } 4771 4771 } 4772 4772 if (ud->rchan_cnt) { 4773 4773 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4774 - for (j = 0; j < rm_res->sets; j++, i += 2) { 4775 - irq_res.desc[i].start = rm_res->desc[j].start + 4776 - oes->bcdma_rchan_data; 4777 - irq_res.desc[i].num = rm_res->desc[j].num; 4774 + if (IS_ERR(rm_res)) { 4775 + irq_res.desc[i].start = oes->bcdma_rchan_data; 4776 + irq_res.desc[i].num = ud->rchan_cnt; 4777 + irq_res.desc[i + 1].start = oes->bcdma_rchan_ring; 4778 + irq_res.desc[i + 1].num = ud->rchan_cnt; 4779 + i += 2; 4780 + } else { 4781 + for (j = 0; j < rm_res->sets; j++, i += 2) { 4782 + irq_res.desc[i].start = rm_res->desc[j].start + 4783 + oes->bcdma_rchan_data; 4784 + irq_res.desc[i].num = rm_res->desc[j].num; 4778 4785 4779 - irq_res.desc[i + 1].start = rm_res->desc[j].start + 4780 - oes->bcdma_rchan_ring; 4781 - irq_res.desc[i + 1].num = rm_res->desc[j].num; 4786 + irq_res.desc[i + 1].start = rm_res->desc[j].start + 4787 + oes->bcdma_rchan_ring; 4788 + irq_res.desc[i + 1].num = rm_res->desc[j].num; 4789 + } 4782 4790 } 4783 4791 } 4784 4792 ··· 4901 4859 if (IS_ERR(rm_res)) { 4902 4860 /* all rflows are assigned exclusively to Linux */ 4903 4861 bitmap_zero(ud->rflow_in_use, ud->rflow_cnt); 4862 + irq_res.sets = 1; 4904 4863 } else { 4905 4864 bitmap_fill(ud->rflow_in_use, ud->rflow_cnt); 4906 4865 for (i = 0; i < rm_res->sets; i++) 4907 4866 udma_mark_resource_ranges(ud, ud->rflow_in_use, 4908 4867 &rm_res->desc[i], "rflow"); 4868 + irq_res.sets = rm_res->sets; 4909 4869 } 4910 - irq_res.sets = rm_res->sets; 4911 4870 4912 4871 /* tflow ranges */ 4913 4872 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW]; 4914 4873 if (IS_ERR(rm_res)) { 4915 4874 /* all tflows are assigned exclusively to Linux */ 4916 4875 bitmap_zero(ud->tflow_map, ud->tflow_cnt); 4876 + irq_res.sets++; 4917 4877 } else { 4918 4878 bitmap_fill(ud->tflow_map, ud->tflow_cnt); 4919 4879 for (i = 0; i < rm_res->sets; i++) 4920 4880 udma_mark_resource_ranges(ud, ud->tflow_map, 4921 4881 &rm_res->desc[i], "tflow"); 4882 + irq_res.sets += rm_res->sets; 4922 4883 } 4923 - irq_res.sets += rm_res->sets; 4924 4884 4925 4885 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); 4886 + if (!irq_res.desc) 4887 + return -ENOMEM; 4926 4888 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW]; 4927 - for (i = 0; i < rm_res->sets; i++) { 4928 - irq_res.desc[i].start = rm_res->desc[i].start + 4929 - oes->pktdma_tchan_flow; 4930 - irq_res.desc[i].num = rm_res->desc[i].num; 4889 + if (IS_ERR(rm_res)) { 4890 + irq_res.desc[0].start = oes->pktdma_tchan_flow; 4891 + irq_res.desc[0].num = ud->tflow_cnt; 4892 + i = 1; 4893 + } else { 4894 + for (i = 0; i < rm_res->sets; i++) { 4895 + irq_res.desc[i].start = rm_res->desc[i].start + 4896 + oes->pktdma_tchan_flow; 4897 + irq_res.desc[i].num = rm_res->desc[i].num; 4898 + } 4931 4899 } 4932 4900 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; 4933 - for (j = 0; j < rm_res->sets; j++, i++) { 4934 - irq_res.desc[i].start = rm_res->desc[j].start + 4935 - oes->pktdma_rchan_flow; 4936 - irq_res.desc[i].num = rm_res->desc[j].num; 4901 + if (IS_ERR(rm_res)) { 4902 + irq_res.desc[i].start = oes->pktdma_rchan_flow; 4903 + irq_res.desc[i].num = ud->rflow_cnt; 4904 + } else { 4905 + for (j = 0; j < rm_res->sets; j++, i++) { 4906 + irq_res.desc[i].start = rm_res->desc[j].start + 4907 + oes->pktdma_rchan_flow; 4908 + irq_res.desc[i].num = rm_res->desc[j].num; 4909 + } 4937 4910 } 4938 4911 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); 4939 4912 kfree(irq_res.desc);