linux: Remove unused Kernel patches

These are not referenced.

-433
-19
pkgs/os-specific/linux/kernel/genksyms-fix-segfault.patch
··· 1 - diff --git a/scripts/genksyms/genksyms.c b/scripts/genksyms/genksyms.c 2 - index 88632df..ba6cfa9 100644 3 - --- a/scripts/genksyms/genksyms.c 4 - +++ b/scripts/genksyms/genksyms.c 5 - @@ -233,11 +233,11 @@ static struct symbol *__add_symbol(const char *name, enum symbol_type type, 6 - free_list(last_enum_expr, NULL); 7 - last_enum_expr = NULL; 8 - enum_counter = 0; 9 - - if (!name) 10 - - /* Anonymous enum definition, nothing more to do */ 11 - - return NULL; 12 - } 13 - 14 - + if (!name) 15 - + return NULL; 16 - + 17 - h = crc32(name) % HASH_BUCKETS; 18 - for (sym = symtab[h]; sym; sym = sym->hash_next) { 19 - if (map_to_ns(sym->type) == map_to_ns(type) &&
-283
pkgs/os-specific/linux/kernel/mac-nvme-t2.patch
··· 1 - diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c 2 - index dd10cf78f2d3..8f006638452b 100644 3 - --- a/drivers/nvme/host/pci.c 4 - +++ b/drivers/nvme/host/pci.c 5 - @@ -28,8 +28,8 @@ 6 - #include "trace.h" 7 - #include "nvme.h" 8 - 9 - -#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 10 - -#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 11 - +#define SQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_command)) 12 - +#define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) 13 - 14 - #define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc)) 15 - 16 - @@ -1344,16 +1344,16 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) 17 - 18 - static void nvme_free_queue(struct nvme_queue *nvmeq) 19 - { 20 - - dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq->q_depth), 21 - + dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq), 22 - (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 23 - if (!nvmeq->sq_cmds) 24 - return; 25 - 26 - if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) { 27 - pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev), 28 - - nvmeq->sq_cmds, SQ_SIZE(nvmeq->q_depth)); 29 - + nvmeq->sq_cmds, SQ_SIZE(nvmeq)); 30 - } else { 31 - - dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq->q_depth), 32 - + dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq), 33 - nvmeq->sq_cmds, nvmeq->sq_dma_addr); 34 - } 35 - } 36 - @@ -1433,12 +1433,12 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, 37 - } 38 - 39 - static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, 40 - - int qid, int depth) 41 - + int qid) 42 - { 43 - struct pci_dev *pdev = to_pci_dev(dev->dev); 44 - 45 - if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { 46 - - nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(depth)); 47 - + nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq)); 48 - if (nvmeq->sq_cmds) { 49 - nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, 50 - nvmeq->sq_cmds); 51 - @@ -1447,11 +1447,11 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, 52 - return 0; 53 - } 54 - 55 - - pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(depth)); 56 - + pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq)); 57 - } 58 - } 59 - 60 - - nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), 61 - + nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq), 62 - &nvmeq->sq_dma_addr, GFP_KERNEL); 63 - if (!nvmeq->sq_cmds) 64 - return -ENOMEM; 65 - @@ -1465,12 +1465,13 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) 66 - if (dev->ctrl.queue_count > qid) 67 - return 0; 68 - 69 - - nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(depth), 70 - + nvmeq->q_depth = depth; 71 - + nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), 72 - &nvmeq->cq_dma_addr, GFP_KERNEL); 73 - if (!nvmeq->cqes) 74 - goto free_nvmeq; 75 - 76 - - if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth)) 77 - + if (nvme_alloc_sq_cmds(dev, nvmeq, qid)) 78 - goto free_cqdma; 79 - 80 - nvmeq->dev = dev; 81 - @@ -1479,15 +1480,14 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) 82 - nvmeq->cq_head = 0; 83 - nvmeq->cq_phase = 1; 84 - nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 85 - - nvmeq->q_depth = depth; 86 - nvmeq->qid = qid; 87 - dev->ctrl.queue_count++; 88 - 89 - return 0; 90 - 91 - free_cqdma: 92 - - dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes, 93 - - nvmeq->cq_dma_addr); 94 - + dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, 95 - + nvmeq->cq_dma_addr); 96 - free_nvmeq: 97 - return -ENOMEM; 98 - } 99 - @@ -1515,7 +1515,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) 100 - nvmeq->cq_head = 0; 101 - nvmeq->cq_phase = 1; 102 - nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 103 - - memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth)); 104 - + memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); 105 - nvme_dbbuf_init(dev, nvmeq, qid); 106 - dev->online_queues++; 107 - wmb(); /* ensure the first interrupt sees the initialization */ 108 - diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c 109 - index cc09b81fc7f4..716ebe87a2b8 100644 110 - --- a/drivers/nvme/host/core.c 111 - +++ b/drivers/nvme/host/core.c 112 - @@ -1986,6 +1986,7 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap) 113 - ctrl->ctrl_config = NVME_CC_CSS_NVM; 114 - ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT; 115 - ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; 116 - + /* Use default IOSQES. We'll update it later if needed */ 117 - ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; 118 - ctrl->ctrl_config |= NVME_CC_ENABLE; 119 - 120 - @@ -2698,6 +2699,30 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) 121 - ctrl->hmmin = le32_to_cpu(id->hmmin); 122 - ctrl->hmminds = le32_to_cpu(id->hmminds); 123 - ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); 124 - + 125 - + /* Grab required IO queue size */ 126 - + ctrl->iosqes = id->sqes & 0xf; 127 - + if (ctrl->iosqes < NVME_NVM_IOSQES) { 128 - + dev_err(ctrl->device, 129 - + "unsupported required IO queue size %d\n", ctrl->iosqes); 130 - + ret = -EINVAL; 131 - + goto out_free; 132 - + } 133 - + /* 134 - + * If our IO queue size isn't the default, update the setting 135 - + * in CC:IOSQES. 136 - + */ 137 - + if (ctrl->iosqes != NVME_NVM_IOSQES) { 138 - + ctrl->ctrl_config &= ~(0xfu << NVME_CC_IOSQES_SHIFT); 139 - + ctrl->ctrl_config |= ctrl->iosqes << NVME_CC_IOSQES_SHIFT; 140 - + ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, 141 - + ctrl->ctrl_config); 142 - + if (ret) { 143 - + dev_err(ctrl->device, 144 - + "error updating CC register\n"); 145 - + goto out_free; 146 - + } 147 - + } 148 - } 149 - 150 - ret = nvme_mpath_init(ctrl, id); 151 - diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h 152 - index 716a876119c8..34ef35fcd8a5 100644 153 - --- a/drivers/nvme/host/nvme.h 154 - +++ b/drivers/nvme/host/nvme.h 155 - @@ -244,6 +244,7 @@ struct nvme_ctrl { 156 - u32 hmmin; 157 - u32 hmminds; 158 - u16 hmmaxd; 159 - + u8 iosqes; 160 - 161 - /* Fabrics only */ 162 - u16 sqsize; 163 - diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c 164 - index 8f006638452b..54b35ea4af88 100644 165 - --- a/drivers/nvme/host/pci.c 166 - +++ b/drivers/nvme/host/pci.c 167 - @@ -28,7 +28,7 @@ 168 - #include "trace.h" 169 - #include "nvme.h" 170 - 171 - -#define SQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_command)) 172 - +#define SQ_SIZE(q) ((q)->q_depth << (q)->sqes) 173 - #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) 174 - 175 - #define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc)) 176 - @@ -162,7 +162,7 @@ static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) 177 - struct nvme_queue { 178 - struct nvme_dev *dev; 179 - spinlock_t sq_lock; 180 - - struct nvme_command *sq_cmds; 181 - + void *sq_cmds; 182 - /* only used for poll queues: */ 183 - spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; 184 - volatile struct nvme_completion *cqes; 185 - @@ -178,6 +178,7 @@ struct nvme_queue { 186 - u16 last_cq_head; 187 - u16 qid; 188 - u8 cq_phase; 189 - + u8 sqes; 190 - unsigned long flags; 191 - #define NVMEQ_ENABLED 0 192 - #define NVMEQ_SQ_CMB 1 193 - @@ -488,7 +489,8 @@ static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd, 194 - bool write_sq) 195 - { 196 - spin_lock(&nvmeq->sq_lock); 197 - - memcpy(&nvmeq->sq_cmds[nvmeq->sq_tail], cmd, sizeof(*cmd)); 198 - + memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes), 199 - + cmd, sizeof(*cmd)); 200 - if (++nvmeq->sq_tail == nvmeq->q_depth) 201 - nvmeq->sq_tail = 0; 202 - nvme_write_sq_db(nvmeq, write_sq); 203 - @@ -1465,6 +1467,7 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) 204 - if (dev->ctrl.queue_count > qid) 205 - return 0; 206 - 207 - + nvmeq->sqes = qid ? dev->ctrl.iosqes : NVME_NVM_ADMSQES; 208 - nvmeq->q_depth = depth; 209 - nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), 210 - &nvmeq->cq_dma_addr, GFP_KERNEL); 211 - diff --git a/include/linux/nvme.h b/include/linux/nvme.h 212 - index 01aa6a6c241d..7af18965fb57 100644 213 - --- a/include/linux/nvme.h 214 - +++ b/include/linux/nvme.h 215 - @@ -141,6 +141,7 @@ enum { 216 - * (In bytes and specified as a power of two (2^n)). 217 - */ 218 - #define NVME_NVM_IOSQES 6 219 - +#define NVME_NVM_ADMSQES 6 220 - #define NVME_NVM_IOCQES 4 221 - 222 - enum { 223 - diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c 224 - index 716ebe87a2b8..480ea24d8cf4 100644 225 - --- a/drivers/nvme/host/core.c 226 - +++ b/drivers/nvme/host/core.c 227 - @@ -2701,7 +2701,10 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) 228 - ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); 229 - 230 - /* Grab required IO queue size */ 231 - - ctrl->iosqes = id->sqes & 0xf; 232 - + if (ctrl->quirks & NVME_QUIRK_128_BYTES_SQES) 233 - + ctrl->iosqes = 7; 234 - + else 235 - + ctrl->iosqes = id->sqes & 0xf; 236 - if (ctrl->iosqes < NVME_NVM_IOSQES) { 237 - dev_err(ctrl->device, 238 - "unsupported required IO queue size %d\n", ctrl->iosqes); 239 - diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h 240 - index 34ef35fcd8a5..b2a78d08b984 100644 241 - --- a/drivers/nvme/host/nvme.h 242 - +++ b/drivers/nvme/host/nvme.h 243 - @@ -92,6 +92,16 @@ enum nvme_quirks { 244 - * Broken Write Zeroes. 245 - */ 246 - NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9), 247 - + 248 - + /* 249 - + * Use only one interrupt vector for all queues 250 - + */ 251 - + NVME_QUIRK_SINGLE_VECTOR = (1 << 10), 252 - + 253 - + /* 254 - + * Use non-standard 128 bytes SQEs. 255 - + */ 256 - + NVME_QUIRK_128_BYTES_SQES = (1 << 11), 257 - }; 258 - 259 - /* 260 - diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c 261 - index 54b35ea4af88..ab2358137419 100644 262 - --- a/drivers/nvme/host/pci.c 263 - +++ b/drivers/nvme/host/pci.c 264 - @@ -2080,6 +2080,9 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) 265 - dev->io_queues[HCTX_TYPE_DEFAULT] = 1; 266 - dev->io_queues[HCTX_TYPE_READ] = 0; 267 - 268 - + if (dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR) 269 - + irq_queues = 1; 270 - + 271 - return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, 272 - PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd); 273 - } 274 - @@ -3037,6 +3040,9 @@ static const struct pci_device_id nvme_id_table[] = { 275 - { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 276 - { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, 277 - { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, 278 - + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005), 279 - + .driver_data = NVME_QUIRK_SINGLE_VECTOR | 280 - + NVME_QUIRK_128_BYTES_SQES }, 281 - { 0, } 282 - }; 283 - MODULE_DEVICE_TABLE(pci, nvme_id_table);
-85
pkgs/os-specific/linux/kernel/p9-fixes.patch
··· 1 - diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c 2 - --- a/fs/9p/vfs_inode.c 3 - +++ b/fs/9p/vfs_inode.c 4 - @@ -483,6 +483,9 @@ static int v9fs_test_inode(struct inode *inode, void *data) 5 - 6 - if (v9inode->qid.type != st->qid.type) 7 - return 0; 8 - + 9 - + if (v9inode->qid.path != st->qid.path) 10 - + return 0; 11 - return 1; 12 - } 13 - 14 - diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c 15 - --- a/fs/9p/vfs_inode_dotl.c 16 - +++ b/fs/9p/vfs_inode_dotl.c 17 - @@ -87,6 +87,9 @@ static int v9fs_test_inode_dotl(struct inode *inode, void *data) 18 - 19 - if (v9inode->qid.type != st->qid.type) 20 - return 0; 21 - + 22 - + if (v9inode->qid.path != st->qid.path) 23 - + return 0; 24 - return 1; 25 - } 26 - 27 - diff --git a/net/9p/client.c b/net/9p/client.c 28 - index 3ce672af1596..f1c8ad373f90 100644 29 - --- a/net/9p/client.c 30 - +++ b/net/9p/client.c 31 - @@ -749,8 +749,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...) 32 - } 33 - again: 34 - /* Wait for the response */ 35 - - err = wait_event_interruptible(*req->wq, 36 - - req->status >= REQ_STATUS_RCVD); 37 - + err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD); 38 - 39 - /* 40 - * Make sure our req is coherent with regard to updates in other 41 - diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c 42 - index f24b25c25106..f3a4efcf1456 100644 43 - --- a/net/9p/trans_virtio.c 44 - +++ b/net/9p/trans_virtio.c 45 - @@ -286,8 +286,8 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req) 46 - if (err == -ENOSPC) { 47 - chan->ring_bufs_avail = 0; 48 - spin_unlock_irqrestore(&chan->lock, flags); 49 - - err = wait_event_interruptible(*chan->vc_wq, 50 - - chan->ring_bufs_avail); 51 - + err = wait_event_killable(*chan->vc_wq, 52 - + chan->ring_bufs_avail); 53 - if (err == -ERESTARTSYS) 54 - return err; 55 - 56 - @@ -327,7 +327,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan, 57 - * Other zc request to finish here 58 - */ 59 - if (atomic_read(&vp_pinned) >= chan->p9_max_pages) { 60 - - err = wait_event_interruptible(vp_wq, 61 - + err = wait_event_killable(vp_wq, 62 - (atomic_read(&vp_pinned) < chan->p9_max_pages)); 63 - if (err == -ERESTARTSYS) 64 - return err; 65 - @@ -471,8 +471,8 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req, 66 - if (err == -ENOSPC) { 67 - chan->ring_bufs_avail = 0; 68 - spin_unlock_irqrestore(&chan->lock, flags); 69 - - err = wait_event_interruptible(*chan->vc_wq, 70 - - chan->ring_bufs_avail); 71 - + err = wait_event_killable(*chan->vc_wq, 72 - + chan->ring_bufs_avail); 73 - if (err == -ERESTARTSYS) 74 - goto err_out; 75 - 76 - @@ -489,8 +489,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req, 77 - virtqueue_kick(chan->vq); 78 - spin_unlock_irqrestore(&chan->lock, flags); 79 - p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n"); 80 - - err = wait_event_interruptible(*req->wq, 81 - - req->status >= REQ_STATUS_RCVD); 82 - + err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD); 83 - /* 84 - * Non kernel buffers are pinned, unpin them 85 - */
-46
pkgs/os-specific/linux/kernel/patches.nix
··· 29 29 patch = ./request-key-helper-updated.patch; 30 30 }; 31 31 32 - p9_fixes = 33 - { name = "p9-fixes"; 34 - patch = ./p9-fixes.patch; 35 - }; 36 - 37 32 modinst_arg_list_too_long = 38 33 { name = "modinst-arglist-too-long"; 39 34 patch = ./modinst-arg-list-too-long.patch; 40 35 }; 41 36 42 - genksyms_fix_segfault = 43 - { name = "genksyms-fix-segfault"; 44 - patch = ./genksyms-fix-segfault.patch; 45 - }; 46 - 47 37 cpu-cgroup-v2 = import ./cpu-cgroup-v2-patches; 48 38 49 39 hardened = let ··· 56 46 patches = lib.importJSON ./hardened/patches.json; 57 47 in lib.mapAttrs mkPatch patches; 58 48 59 - # https://bugzilla.kernel.org/show_bug.cgi?id=197591#c6 60 - iwlwifi_mvm_support_version_7_scan_req_umac_fw_command = rec { 61 - name = "iwlwifi_mvm_support_version_7_scan_req_umac_fw_command"; 62 - patch = fetchpatch { 63 - name = name + ".patch"; 64 - url = "https://bugzilla.kernel.org/attachment.cgi?id=260597"; 65 - sha256 = "09096npxpgvlwdz3pb3m9brvxh7vy0xc9z9p8hh85xyczyzcsjhr"; 66 - }; 67 - }; 68 - 69 - # https://github.com/NixOS/nixpkgs/issues/42755 70 - xen-netfront_fix_mismatched_rtnl_unlock = rec { 71 - name = "xen-netfront_fix_mismatched_rtnl_unlock"; 72 - patch = fetchpatch { 73 - name = name + ".patch"; 74 - url = "https://github.com/torvalds/linux/commit/cb257783c2927b73614b20f915a91ff78aa6f3e8.patch"; 75 - sha256 = "0xhblx2j8wi3kpnfpgjjwlcwdry97ji2aaq54r3zirk5g5p72zs8"; 76 - }; 77 - }; 78 - 79 - # https://github.com/NixOS/nixpkgs/issues/42755 80 - xen-netfront_update_features_after_registering_netdev = rec { 81 - name = "xen-netfront_update_features_after_registering_netdev"; 82 - patch = fetchpatch { 83 - name = name + ".patch"; 84 - url = "https://github.com/torvalds/linux/commit/45c8184c1bed1ca8a7f02918552063a00b909bf5.patch"; 85 - sha256 = "1l8xq02rd7vakxg52xm9g4zng0ald866rpgm8kjlh88mwwyjkrwv"; 86 - }; 87 - }; 88 - 89 49 # Adapted for Linux 5.4 from: 90 50 # https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=04896832c94aae4842100cafb8d3a73e1bed3a45 91 51 rtl8761b_support = ··· 96 56 export-rt-sched-migrate = { 97 57 name = "export-rt-sched-migrate"; 98 58 patch = ./export-rt-sched-migrate.patch; 99 - }; 100 - 101 - # patches from https://lkml.org/lkml/2019/7/15/1748 102 - mac_nvme_t2 = rec { 103 - name = "mac_nvme_t2"; 104 - patch = ./mac-nvme-t2.patch; 105 59 }; 106 60 }