Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'overflow-v4.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux

Pull overflow updates from Kees Cook:
"This adds the new overflow checking helpers and adds them to the
2-factor argument allocators. And this adds the saturating size
helpers and does a treewide replacement for the struct_size() usage.
Additionally this adds the overflow testing modules to make sure
everything works.

I'm still working on the treewide replacements for allocators with
"simple" multiplied arguments:

*alloc(a * b, ...) -> *alloc_array(a, b, ...)

and

*zalloc(a * b, ...) -> *calloc(a, b, ...)

as well as the more complex cases, but that's separable from this
portion of the series. I expect to have the rest sent before -rc1
closes; there are a lot of messy cases to clean up.

Summary:

- Introduce arithmetic overflow test helper functions (Rasmus)

- Use overflow helpers in 2-factor allocators (Kees, Rasmus)

- Introduce overflow test module (Rasmus, Kees)

- Introduce saturating size helper functions (Matthew, Kees)

- Treewide use of struct_size() for allocators (Kees)"

* tag 'overflow-v4.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux:
treewide: Use struct_size() for devm_kmalloc() and friends
treewide: Use struct_size() for vmalloc()-family
treewide: Use struct_size() for kmalloc()-family
device: Use overflow helpers for devm_kmalloc()
mm: Use overflow helpers in kvmalloc()
mm: Use overflow helpers in kmalloc_array*()
test_overflow: Add memory allocation overflow tests
overflow.h: Add allocation size calculation helpers
test_overflow: Report test failures
test_overflow: macrofy some more, do more tests for free
lib: add runtime test of check_*_overflow functions
compiler.h: enable builtin overflow checkers and add fallback code

+916 -205
+2 -2
crypto/af_alg.c
··· 500 500 sg = sgl->sg; 501 501 502 502 if (!sg || sgl->cur >= MAX_SGL_ENTS) { 503 - sgl = sock_kmalloc(sk, sizeof(*sgl) + 504 - sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1), 503 + sgl = sock_kmalloc(sk, 504 + struct_size(sgl, sg, (MAX_SGL_ENTS + 1)), 505 505 GFP_KERNEL); 506 506 if (!sgl) 507 507 return -ENOMEM;
+6 -1
drivers/base/devres.c
··· 84 84 static __always_inline struct devres * alloc_dr(dr_release_t release, 85 85 size_t size, gfp_t gfp, int nid) 86 86 { 87 - size_t tot_size = sizeof(struct devres) + size; 87 + size_t tot_size; 88 88 struct devres *dr; 89 + 90 + /* We must catch any near-SIZE_MAX cases that could overflow. */ 91 + if (unlikely(check_add_overflow(sizeof(struct devres), size, 92 + &tot_size))) 93 + return NULL; 89 94 90 95 dr = kmalloc_node_track_caller(tot_size, gfp, nid); 91 96 if (unlikely(!dr))
+4 -2
drivers/clk/bcm/clk-bcm2835-aux.c
··· 40 40 if (IS_ERR(reg)) 41 41 return PTR_ERR(reg); 42 42 43 - onecell = devm_kmalloc(dev, sizeof(*onecell) + sizeof(*onecell->hws) * 44 - BCM2835_AUX_CLOCK_COUNT, GFP_KERNEL); 43 + onecell = devm_kmalloc(dev, 44 + struct_size(onecell, hws, 45 + BCM2835_AUX_CLOCK_COUNT), 46 + GFP_KERNEL); 45 47 if (!onecell) 46 48 return -ENOMEM; 47 49 onecell->num = BCM2835_AUX_CLOCK_COUNT;
+2 -2
drivers/clk/bcm/clk-bcm2835.c
··· 2147 2147 size_t i; 2148 2148 int ret; 2149 2149 2150 - cprman = devm_kzalloc(dev, sizeof(*cprman) + 2151 - sizeof(*cprman->onecell.hws) * asize, 2150 + cprman = devm_kzalloc(dev, 2151 + struct_size(cprman, onecell.hws, asize), 2152 2152 GFP_KERNEL); 2153 2153 if (!cprman) 2154 2154 return -ENOMEM;
+2 -2
drivers/clk/bcm/clk-iproc-asiu.c
··· 197 197 if (WARN_ON(!asiu)) 198 198 return; 199 199 200 - asiu->clk_data = kzalloc(sizeof(*asiu->clk_data->hws) * num_clks + 201 - sizeof(*asiu->clk_data), GFP_KERNEL); 200 + asiu->clk_data = kzalloc(struct_size(asiu->clk_data, hws, num_clks), 201 + GFP_KERNEL); 202 202 if (WARN_ON(!asiu->clk_data)) 203 203 goto err_clks; 204 204 asiu->clk_data->num = num_clks;
+1 -2
drivers/clk/bcm/clk-iproc-pll.c
··· 744 744 if (WARN_ON(!pll)) 745 745 return; 746 746 747 - clk_data = kzalloc(sizeof(*clk_data->hws) * num_clks + 748 - sizeof(*clk_data), GFP_KERNEL); 747 + clk_data = kzalloc(struct_size(clk_data, hws, num_clks), GFP_KERNEL); 749 748 if (WARN_ON(!clk_data)) 750 749 goto err_clk_data; 751 750 clk_data->num = num_clks;
+1 -2
drivers/clk/berlin/bg2.c
··· 509 509 u8 avpll_flags = 0; 510 510 int n, ret; 511 511 512 - clk_data = kzalloc(sizeof(*clk_data) + 513 - sizeof(*clk_data->hws) * MAX_CLKS, GFP_KERNEL); 512 + clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL); 514 513 if (!clk_data) 515 514 return; 516 515 clk_data->num = MAX_CLKS;
+1 -2
drivers/clk/berlin/bg2q.c
··· 295 295 struct clk_hw **hws; 296 296 int n, ret; 297 297 298 - clk_data = kzalloc(sizeof(*clk_data) + 299 - sizeof(*clk_data->hws) * MAX_CLKS, GFP_KERNEL); 298 + clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL); 300 299 if (!clk_data) 301 300 return; 302 301 clk_data->num = MAX_CLKS;
+1 -2
drivers/clk/clk-asm9260.c
··· 273 273 int n; 274 274 u32 accuracy = 0; 275 275 276 - clk_data = kzalloc(sizeof(*clk_data) + 277 - sizeof(*clk_data->hws) * MAX_CLKS, GFP_KERNEL); 276 + clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL); 278 277 if (!clk_data) 279 278 return; 280 279 clk_data->num = MAX_CLKS;
+3 -3
drivers/clk/clk-aspeed.c
··· 627 627 if (!scu_base) 628 628 return; 629 629 630 - aspeed_clk_data = kzalloc(sizeof(*aspeed_clk_data) + 631 - sizeof(*aspeed_clk_data->hws) * ASPEED_NUM_CLKS, 632 - GFP_KERNEL); 630 + aspeed_clk_data = kzalloc(struct_size(aspeed_clk_data, hws, 631 + ASPEED_NUM_CLKS), 632 + GFP_KERNEL); 633 633 if (!aspeed_clk_data) 634 634 return; 635 635
+3 -3
drivers/clk/clk-clps711x.c
··· 54 54 if (!base) 55 55 return ERR_PTR(-ENOMEM); 56 56 57 - clps711x_clk = kzalloc(sizeof(*clps711x_clk) + 58 - sizeof(*clps711x_clk->clk_data.hws) * CLPS711X_CLK_MAX, 59 - GFP_KERNEL); 57 + clps711x_clk = kzalloc(struct_size(clps711x_clk, clk_data.hws, 58 + CLPS711X_CLK_MAX), 59 + GFP_KERNEL); 60 60 if (!clps711x_clk) 61 61 return ERR_PTR(-ENOMEM); 62 62
+2 -2
drivers/clk/clk-efm32gg.c
··· 25 25 void __iomem *base; 26 26 struct clk_hw **hws; 27 27 28 - clk_data = kzalloc(sizeof(*clk_data) + 29 - sizeof(*clk_data->hws) * CMU_MAX_CLKS, GFP_KERNEL); 28 + clk_data = kzalloc(struct_size(clk_data, hws, CMU_MAX_CLKS), 29 + GFP_KERNEL); 30 30 31 31 if (!clk_data) 32 32 return;
+3 -3
drivers/clk/clk-gemini.c
··· 399 399 int ret; 400 400 int i; 401 401 402 - gemini_clk_data = kzalloc(sizeof(*gemini_clk_data) + 403 - sizeof(*gemini_clk_data->hws) * GEMINI_NUM_CLKS, 404 - GFP_KERNEL); 402 + gemini_clk_data = kzalloc(struct_size(gemini_clk_data, hws, 403 + GEMINI_NUM_CLKS), 404 + GFP_KERNEL); 405 405 if (!gemini_clk_data) 406 406 return; 407 407
+2 -2
drivers/clk/clk-s2mps11.c
··· 147 147 if (!s2mps11_clks) 148 148 return -ENOMEM; 149 149 150 - clk_data = devm_kzalloc(&pdev->dev, sizeof(*clk_data) + 151 - sizeof(*clk_data->hws) * S2MPS11_CLKS_NUM, 150 + clk_data = devm_kzalloc(&pdev->dev, 151 + struct_size(clk_data, hws, S2MPS11_CLKS_NUM), 152 152 GFP_KERNEL); 153 153 if (!clk_data) 154 154 return -ENOMEM;
+2 -2
drivers/clk/clk-scmi.c
··· 137 137 return -EINVAL; 138 138 } 139 139 140 - clk_data = devm_kzalloc(dev, sizeof(*clk_data) + 141 - sizeof(*clk_data->hws) * count, GFP_KERNEL); 140 + clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, count), 141 + GFP_KERNEL); 142 142 if (!clk_data) 143 143 return -ENOMEM; 144 144
+2 -3
drivers/clk/clk-stm32h7.c
··· 1201 1201 const char *hse_clk, *lse_clk, *i2s_clk; 1202 1202 struct regmap *pdrm; 1203 1203 1204 - clk_data = kzalloc(sizeof(*clk_data) + 1205 - sizeof(*clk_data->hws) * STM32H7_MAX_CLKS, 1206 - GFP_KERNEL); 1204 + clk_data = kzalloc(struct_size(clk_data, hws, STM32H7_MAX_CLKS), 1205 + GFP_KERNEL); 1207 1206 if (!clk_data) 1208 1207 return; 1209 1208
+2 -3
drivers/clk/clk-stm32mp1.c
··· 2060 2060 2061 2061 max_binding = data->maxbinding; 2062 2062 2063 - clk_data = kzalloc(sizeof(*clk_data) + 2064 - sizeof(*clk_data->hws) * max_binding, 2065 - GFP_KERNEL); 2063 + clk_data = kzalloc(struct_size(clk_data, hws, max_binding), 2064 + GFP_KERNEL); 2066 2065 if (!clk_data) 2067 2066 return -ENOMEM; 2068 2067
+2 -2
drivers/clk/davinci/da8xx-cfgchip.c
··· 650 650 struct da8xx_usb0_clk48 *usb0; 651 651 struct da8xx_usb1_clk48 *usb1; 652 652 653 - clk_data = devm_kzalloc(dev, sizeof(*clk_data) + 2 * 654 - sizeof(*clk_data->hws), GFP_KERNEL); 653 + clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, 2), 654 + GFP_KERNEL); 655 655 if (!clk_data) 656 656 return -ENOMEM; 657 657
+4 -3
drivers/clk/mvebu/armada-37xx-periph.c
··· 667 667 if (!driver_data) 668 668 return -ENOMEM; 669 669 670 - driver_data->hw_data = devm_kzalloc(dev, sizeof(*driver_data->hw_data) + 671 - sizeof(*driver_data->hw_data->hws) * num_periph, 672 - GFP_KERNEL); 670 + driver_data->hw_data = devm_kzalloc(dev, 671 + struct_size(driver_data->hw_data, 672 + hws, num_periph), 673 + GFP_KERNEL); 673 674 if (!driver_data->hw_data) 674 675 return -ENOMEM; 675 676 driver_data->hw_data->num = num_periph;
+2 -2
drivers/clk/mvebu/armada-37xx-tbg.c
··· 91 91 void __iomem *reg; 92 92 int i, ret; 93 93 94 - hw_tbg_data = devm_kzalloc(&pdev->dev, sizeof(*hw_tbg_data) 95 - + sizeof(*hw_tbg_data->hws) * NUM_TBG, 94 + hw_tbg_data = devm_kzalloc(&pdev->dev, 95 + struct_size(hw_tbg_data, hws, NUM_TBG), 96 96 GFP_KERNEL); 97 97 if (!hw_tbg_data) 98 98 return -ENOMEM;
+1 -2
drivers/clk/qcom/clk-spmi-pmic-div.c
··· 239 239 if (!nclks) 240 240 return -EINVAL; 241 241 242 - cc = devm_kzalloc(dev, sizeof(*cc) + sizeof(*cc->clks) * nclks, 243 - GFP_KERNEL); 242 + cc = devm_kzalloc(dev, struct_size(cc, clks, nclks), GFP_KERNEL); 244 243 if (!cc) 245 244 return -ENOMEM; 246 245 cc->nclks = nclks;
+2 -2
drivers/clk/samsung/clk-exynos-audss.c
··· 149 149 epll = ERR_PTR(-ENODEV); 150 150 151 151 clk_data = devm_kzalloc(dev, 152 - sizeof(*clk_data) + 153 - sizeof(*clk_data->hws) * EXYNOS_AUDSS_MAX_CLKS, 152 + struct_size(clk_data, hws, 153 + EXYNOS_AUDSS_MAX_CLKS), 154 154 GFP_KERNEL); 155 155 if (!clk_data) 156 156 return -ENOMEM;
+1 -2
drivers/clk/samsung/clk-exynos-clkout.c
··· 61 61 int ret; 62 62 int i; 63 63 64 - clkout = kzalloc(sizeof(*clkout) + 65 - sizeof(*clkout->data.hws) * EXYNOS_CLKOUT_NR_CLKS, 64 + clkout = kzalloc(struct_size(clkout, data.hws, EXYNOS_CLKOUT_NR_CLKS), 66 65 GFP_KERNEL); 67 66 if (!clkout) 68 67 return;
+2 -2
drivers/clk/samsung/clk-exynos5433.c
··· 5505 5505 5506 5506 info = of_device_get_match_data(dev); 5507 5507 5508 - data = devm_kzalloc(dev, sizeof(*data) + 5509 - sizeof(*data->ctx.clk_data.hws) * info->nr_clk_ids, 5508 + data = devm_kzalloc(dev, 5509 + struct_size(data, ctx.clk_data.hws, info->nr_clk_ids), 5510 5510 GFP_KERNEL); 5511 5511 if (!data) 5512 5512 return -ENOMEM;
+4 -3
drivers/clk/samsung/clk-s3c2410-dclk.c
··· 247 247 struct clk_hw **clk_table; 248 248 int ret, i; 249 249 250 - s3c24xx_dclk = devm_kzalloc(&pdev->dev, sizeof(*s3c24xx_dclk) + 251 - sizeof(*s3c24xx_dclk->clk_data.hws) * DCLK_MAX_CLKS, 252 - GFP_KERNEL); 250 + s3c24xx_dclk = devm_kzalloc(&pdev->dev, 251 + struct_size(s3c24xx_dclk, clk_data.hws, 252 + DCLK_MAX_CLKS), 253 + GFP_KERNEL); 253 254 if (!s3c24xx_dclk) 254 255 return -ENOMEM; 255 256
+1 -2
drivers/clk/samsung/clk-s5pv210-audss.c
··· 81 81 } 82 82 83 83 clk_data = devm_kzalloc(&pdev->dev, 84 - sizeof(*clk_data) + 85 - sizeof(*clk_data->hws) * AUDSS_MAX_CLKS, 84 + struct_size(clk_data, hws, AUDSS_MAX_CLKS), 86 85 GFP_KERNEL); 87 86 88 87 if (!clk_data)
+1 -1
drivers/dax/device.c
··· 594 594 if (!count) 595 595 return ERR_PTR(-EINVAL); 596 596 597 - dev_dax = kzalloc(sizeof(*dev_dax) + sizeof(*res) * count, GFP_KERNEL); 597 + dev_dax = kzalloc(struct_size(dev_dax, res, count), GFP_KERNEL); 598 598 if (!dev_dax) 599 599 return ERR_PTR(-ENOMEM); 600 600
+2 -3
drivers/dma/bcm-sba-raid.c
··· 1499 1499 1500 1500 for (i = 0; i < sba->max_req; i++) { 1501 1501 req = devm_kzalloc(sba->dev, 1502 - sizeof(*req) + 1503 - sba->max_cmd_per_req * sizeof(req->cmds[0]), 1504 - GFP_KERNEL); 1502 + struct_size(req, cmds, sba->max_cmd_per_req), 1503 + GFP_KERNEL); 1505 1504 if (!req) { 1506 1505 ret = -ENOMEM; 1507 1506 goto fail_free_cmds_pool;
+3 -6
drivers/dma/edma.c
··· 1074 1074 return NULL; 1075 1075 } 1076 1076 1077 - edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]), 1078 - GFP_ATOMIC); 1077 + edesc = kzalloc(struct_size(edesc, pset, sg_len), GFP_ATOMIC); 1079 1078 if (!edesc) 1080 1079 return NULL; 1081 1080 ··· 1191 1192 nslots = 2; 1192 1193 } 1193 1194 1194 - edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), 1195 - GFP_ATOMIC); 1195 + edesc = kzalloc(struct_size(edesc, pset, nslots), GFP_ATOMIC); 1196 1196 if (!edesc) 1197 1197 return NULL; 1198 1198 ··· 1313 1315 } 1314 1316 } 1315 1317 1316 - edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), 1317 - GFP_ATOMIC); 1318 + edesc = kzalloc(struct_size(edesc, pset, nslots), GFP_ATOMIC); 1318 1319 if (!edesc) 1319 1320 return NULL; 1320 1321
+1 -1
drivers/dma/moxart-dma.c
··· 309 309 return NULL; 310 310 } 311 311 312 - d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC); 312 + d = kzalloc(struct_size(d, sg, sg_len), GFP_ATOMIC); 313 313 if (!d) 314 314 return NULL; 315 315
+2 -2
drivers/dma/nbpfaxi.c
··· 1305 1305 cfg = of_device_get_match_data(dev); 1306 1306 num_channels = cfg->num_channels; 1307 1307 1308 - nbpf = devm_kzalloc(dev, sizeof(*nbpf) + num_channels * 1309 - sizeof(nbpf->chan[0]), GFP_KERNEL); 1308 + nbpf = devm_kzalloc(dev, struct_size(nbpf, chan, num_channels), 1309 + GFP_KERNEL); 1310 1310 if (!nbpf) 1311 1311 return -ENOMEM; 1312 1312
+1 -1
drivers/dma/omap-dma.c
··· 917 917 } 918 918 919 919 /* Now allocate and setup the descriptor. */ 920 - d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC); 920 + d = kzalloc(struct_size(d, sg, sglen), GFP_ATOMIC); 921 921 if (!d) 922 922 return NULL; 923 923
+2 -2
drivers/dma/sa11x0-dma.c
··· 557 557 } 558 558 } 559 559 560 - txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC); 560 + txd = kzalloc(struct_size(txd, sg, j), GFP_ATOMIC); 561 561 if (!txd) { 562 562 dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc); 563 563 return NULL; ··· 627 627 if (sglen == 0) 628 628 return NULL; 629 629 630 - txd = kzalloc(sizeof(*txd) + sglen * sizeof(txd->sg[0]), GFP_ATOMIC); 630 + txd = kzalloc(struct_size(txd, sg, sglen), GFP_ATOMIC); 631 631 if (!txd) { 632 632 dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc); 633 633 return NULL;
+1 -1
drivers/dma/sh/usb-dmac.c
··· 269 269 struct usb_dmac_desc *desc; 270 270 unsigned long flags; 271 271 272 - desc = kzalloc(sizeof(*desc) + sg_len * sizeof(desc->sg[0]), gfp); 272 + desc = kzalloc(struct_size(desc, sg, sg_len), gfp); 273 273 if (!desc) 274 274 return -ENOMEM; 275 275
+2 -2
drivers/dma/sprd-dma.c
··· 805 805 return ret; 806 806 } 807 807 808 - sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev) + 809 - sizeof(*dma_chn) * chn_count, 808 + sdev = devm_kzalloc(&pdev->dev, 809 + struct_size(sdev, channels, chn_count), 810 810 GFP_KERNEL); 811 811 if (!sdev) 812 812 return -ENOMEM;
+1 -2
drivers/firewire/core-topology.c
··· 112 112 { 113 113 struct fw_node *node; 114 114 115 - node = kzalloc(sizeof(*node) + port_count * sizeof(node->ports[0]), 116 - GFP_ATOMIC); 115 + node = kzalloc(struct_size(node, ports, port_count), GFP_ATOMIC); 117 116 if (node == NULL) 118 117 return NULL; 119 118
+1 -2
drivers/gpio/gpio-uniphier.c
··· 371 371 return ret; 372 372 373 373 nregs = uniphier_gpio_get_nbanks(ngpios) * 2 + 3; 374 - priv = devm_kzalloc(dev, 375 - sizeof(*priv) + sizeof(priv->saved_vals[0]) * nregs, 374 + priv = devm_kzalloc(dev, struct_size(priv, saved_vals, nregs), 376 375 GFP_KERNEL); 377 376 if (!priv) 378 377 return -ENOMEM;
+1 -2
drivers/gpio/gpiolib.c
··· 4023 4023 if (count < 0) 4024 4024 return ERR_PTR(count); 4025 4025 4026 - descs = kzalloc(sizeof(*descs) + sizeof(descs->desc[0]) * count, 4027 - GFP_KERNEL); 4026 + descs = kzalloc(struct_size(descs, desc, count), GFP_KERNEL); 4028 4027 if (!descs) 4029 4028 return ERR_PTR(-ENOMEM); 4030 4029
+1 -2
drivers/gpu/drm/nouveau/nvkm/core/ramht.c
··· 144 144 struct nvkm_ramht *ramht; 145 145 int ret, i; 146 146 147 - if (!(ramht = *pramht = vzalloc(sizeof(*ramht) + 148 - (size >> 3) * sizeof(*ramht->data)))) 147 + if (!(ramht = *pramht = vzalloc(struct_size(ramht, data, (size >> 3))))) 149 148 return -ENOMEM; 150 149 151 150 ramht->device = device;
+2 -2
drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
··· 779 779 780 780 sdom = spec; 781 781 while (sdom->signal_nr) { 782 - dom = kzalloc(sizeof(*dom) + sdom->signal_nr * 783 - sizeof(*dom->signal), GFP_KERNEL); 782 + dom = kzalloc(struct_size(dom, signal, sdom->signal_nr), 783 + GFP_KERNEL); 784 784 if (!dom) 785 785 return -ENOMEM; 786 786
+1 -1
drivers/hwspinlock/omap_hwspinlock.c
··· 132 132 133 133 num_locks = i * 32; /* actual number of locks in this device */ 134 134 135 - bank = kzalloc(sizeof(*bank) + num_locks * sizeof(*hwlock), GFP_KERNEL); 135 + bank = kzalloc(struct_size(bank, lock, num_locks), GFP_KERNEL); 136 136 if (!bank) { 137 137 ret = -ENOMEM; 138 138 goto iounmap_base;
+4 -2
drivers/hwspinlock/sirf_hwspinlock.c
··· 62 62 if (!pdev->dev.of_node) 63 63 return -ENODEV; 64 64 65 - hwspin = devm_kzalloc(&pdev->dev, sizeof(*hwspin) + 66 - sizeof(*hwlock) * HW_SPINLOCK_NUMBER, GFP_KERNEL); 65 + hwspin = devm_kzalloc(&pdev->dev, 66 + struct_size(hwspin, bank.lock, 67 + HW_SPINLOCK_NUMBER), 68 + GFP_KERNEL); 67 69 if (!hwspin) 68 70 return -ENOMEM; 69 71
+1 -1
drivers/hwspinlock/u8500_hsem.c
··· 119 119 /* clear all interrupts */ 120 120 writel(0xFFFF, io_base + HSEM_ICRALL); 121 121 122 - bank = kzalloc(sizeof(*bank) + num_locks * sizeof(*hwlock), GFP_KERNEL); 122 + bank = kzalloc(struct_size(bank, lock, num_locks), GFP_KERNEL); 123 123 if (!bank) { 124 124 ret = -ENOMEM; 125 125 goto iounmap_base;
+3 -2
drivers/infiniband/core/cache.c
··· 1157 1157 goto err; 1158 1158 } 1159 1159 1160 - pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len * 1161 - sizeof *pkey_cache->table, GFP_KERNEL); 1160 + pkey_cache = kmalloc(struct_size(pkey_cache, table, 1161 + tprops->pkey_tbl_len), 1162 + GFP_KERNEL); 1162 1163 if (!pkey_cache) 1163 1164 goto err; 1164 1165
+2 -2
drivers/infiniband/core/cm.c
··· 4298 4298 int count = 0; 4299 4299 u8 i; 4300 4300 4301 - cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) * 4302 - ib_device->phys_port_cnt, GFP_KERNEL); 4301 + cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt), 4302 + GFP_KERNEL); 4303 4303 if (!cm_dev) 4304 4304 return; 4305 4305
+1 -1
drivers/infiniband/core/multicast.c
··· 813 813 int i; 814 814 int count = 0; 815 815 816 - dev = kmalloc(sizeof *dev + device->phys_port_cnt * sizeof *port, 816 + dev = kmalloc(struct_size(dev, port, device->phys_port_cnt), 817 817 GFP_KERNEL); 818 818 if (!dev) 819 819 return;
+2 -2
drivers/infiniband/core/uverbs_cmd.c
··· 2756 2756 struct ib_uflow_resources *resources; 2757 2757 2758 2758 resources = 2759 - kmalloc(sizeof(*resources) + 2760 - num_specs * sizeof(*resources->collection), GFP_KERNEL); 2759 + kmalloc(struct_size(resources, collection, num_specs), 2760 + GFP_KERNEL); 2761 2761 2762 2762 if (!resources) 2763 2763 return NULL;
+10 -11
drivers/infiniband/core/uverbs_ioctl_merge.c
··· 297 297 if (max_attr_buckets >= 0) 298 298 num_attr_buckets = max_attr_buckets + 1; 299 299 300 - method = kzalloc(sizeof(*method) + 301 - num_attr_buckets * sizeof(*method->attr_buckets), 300 + method = kzalloc(struct_size(method, attr_buckets, num_attr_buckets), 302 301 GFP_KERNEL); 303 302 if (!method) 304 303 return ERR_PTR(-ENOMEM); ··· 445 446 if (max_method_buckets >= 0) 446 447 num_method_buckets = max_method_buckets + 1; 447 448 448 - object = kzalloc(sizeof(*object) + 449 - num_method_buckets * 450 - sizeof(*object->method_buckets), GFP_KERNEL); 449 + object = kzalloc(struct_size(object, method_buckets, 450 + num_method_buckets), 451 + GFP_KERNEL); 451 452 if (!object) 452 453 return ERR_PTR(-ENOMEM); 453 454 ··· 468 469 if (methods_max_bucket < 0) 469 470 continue; 470 471 471 - hash = kzalloc(sizeof(*hash) + 472 - sizeof(*hash->methods) * (methods_max_bucket + 1), 472 + hash = kzalloc(struct_size(hash, methods, 473 + methods_max_bucket + 1), 473 474 GFP_KERNEL); 474 475 if (!hash) { 475 476 res = -ENOMEM; ··· 578 579 if (max_object_buckets >= 0) 579 580 num_objects_buckets = max_object_buckets + 1; 580 581 581 - root_spec = kzalloc(sizeof(*root_spec) + 582 - num_objects_buckets * sizeof(*root_spec->object_buckets), 582 + root_spec = kzalloc(struct_size(root_spec, object_buckets, 583 + num_objects_buckets), 583 584 GFP_KERNEL); 584 585 if (!root_spec) 585 586 return ERR_PTR(-ENOMEM); ··· 602 603 if (objects_max_bucket < 0) 603 604 continue; 604 605 605 - hash = kzalloc(sizeof(*hash) + 606 - sizeof(*hash->objects) * (objects_max_bucket + 1), 606 + hash = kzalloc(struct_size(hash, objects, 607 + objects_max_bucket + 1), 607 608 GFP_KERNEL); 608 609 if (!hash) { 609 610 res = -ENOMEM;
+2 -2
drivers/infiniband/hw/mthca/mthca_memfree.c
··· 367 367 obj_per_chunk = MTHCA_TABLE_CHUNK_SIZE / obj_size; 368 368 num_icm = DIV_ROUND_UP(nobj, obj_per_chunk); 369 369 370 - table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL); 370 + table = kmalloc(struct_size(table, icm, num_icm), GFP_KERNEL); 371 371 if (!table) 372 372 return NULL; 373 373 ··· 529 529 return NULL; 530 530 531 531 npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; 532 - db_tab = kmalloc(sizeof *db_tab + npages * sizeof *db_tab->page, GFP_KERNEL); 532 + db_tab = kmalloc(struct_size(db_tab, page, npages), GFP_KERNEL); 533 533 if (!db_tab) 534 534 return ERR_PTR(-ENOMEM); 535 535
+2 -2
drivers/infiniband/sw/rdmavt/mr.c
··· 283 283 284 284 /* Allocate struct plus pointers to first level page tables. */ 285 285 m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ; 286 - mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL); 286 + mr = kzalloc(struct_size(mr, mr.map, m), GFP_KERNEL); 287 287 if (!mr) 288 288 goto bail; 289 289 ··· 730 730 731 731 /* Allocate struct plus pointers to first level page tables. */ 732 732 m = (fmr_attr->max_pages + RVT_SEGSZ - 1) / RVT_SEGSZ; 733 - fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL); 733 + fmr = kzalloc(struct_size(fmr, mr.map, m), GFP_KERNEL); 734 734 if (!fmr) 735 735 goto bail; 736 736
+1 -2
drivers/input/input-leds.c
··· 98 98 if (!num_leds) 99 99 return -ENXIO; 100 100 101 - leds = kzalloc(sizeof(*leds) + num_leds * sizeof(*leds->leds), 102 - GFP_KERNEL); 101 + leds = kzalloc(struct_size(leds, leds, num_leds), GFP_KERNEL); 103 102 if (!leds) 104 103 return -ENOMEM; 105 104
+1 -1
drivers/input/input-mt.c
··· 49 49 if (mt) 50 50 return mt->num_slots != num_slots ? -EINVAL : 0; 51 51 52 - mt = kzalloc(sizeof(*mt) + num_slots * sizeof(*mt->slots), GFP_KERNEL); 52 + mt = kzalloc(struct_size(mt, slots, num_slots), GFP_KERNEL); 53 53 if (!mt) 54 54 goto err_mem; 55 55
+1 -2
drivers/input/keyboard/cap11xx.c
··· 357 357 } 358 358 359 359 priv = devm_kzalloc(dev, 360 - sizeof(*priv) + 361 - cap->num_channels * sizeof(priv->keycodes[0]), 360 + struct_size(priv, keycodes, cap->num_channels), 362 361 GFP_KERNEL); 363 362 if (!priv) 364 363 return -ENOMEM;
+1 -1
drivers/md/dm-raid.c
··· 756 756 return ERR_PTR(-EINVAL); 757 757 } 758 758 759 - rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL); 759 + rs = kzalloc(struct_size(rs, dev, raid_devs), GFP_KERNEL); 760 760 if (!rs) { 761 761 ti->error = "Cannot allocate raid context"; 762 762 return ERR_PTR(-ENOMEM);
+5 -5
drivers/md/dm-table.c
··· 548 548 * On the other hand, dm-switch needs to process bulk data using messages and 549 549 * excessive use of GFP_NOIO could cause trouble. 550 550 */ 551 - static char **realloc_argv(unsigned *array_size, char **old_argv) 551 + static char **realloc_argv(unsigned *size, char **old_argv) 552 552 { 553 553 char **argv; 554 554 unsigned new_size; 555 555 gfp_t gfp; 556 556 557 - if (*array_size) { 558 - new_size = *array_size * 2; 557 + if (*size) { 558 + new_size = *size * 2; 559 559 gfp = GFP_KERNEL; 560 560 } else { 561 561 new_size = 8; ··· 563 563 } 564 564 argv = kmalloc(new_size * sizeof(*argv), gfp); 565 565 if (argv) { 566 - memcpy(argv, old_argv, *array_size * sizeof(*argv)); 567 - *array_size = new_size; 566 + memcpy(argv, old_argv, *size * sizeof(*argv)); 567 + *size = new_size; 568 568 } 569 569 570 570 kfree(old_argv);
+2 -2
drivers/mfd/qcom-pm8xxx.c
··· 563 563 pr_info("PMIC revision 2: %02X\n", val); 564 564 rev |= val << BITS_PER_BYTE; 565 565 566 - chip = devm_kzalloc(&pdev->dev, sizeof(*chip) + 567 - sizeof(chip->config[0]) * data->num_irqs, 566 + chip = devm_kzalloc(&pdev->dev, 567 + struct_size(chip, config, data->num_irqs), 568 568 GFP_KERNEL); 569 569 if (!chip) 570 570 return -ENOMEM;
+2 -2
drivers/misc/cb710/core.c
··· 232 232 if (val & CB710_SLOT_SM) 233 233 ++n; 234 234 235 - chip = devm_kzalloc(&pdev->dev, 236 - sizeof(*chip) + n * sizeof(*chip->slot), GFP_KERNEL); 235 + chip = devm_kzalloc(&pdev->dev, struct_size(chip, slot, n), 236 + GFP_KERNEL); 237 237 if (!chip) 238 238 return -ENOMEM; 239 239
+1 -2
drivers/misc/vexpress-syscfg.c
··· 182 182 val = energy_quirk; 183 183 } 184 184 185 - func = kzalloc(sizeof(*func) + sizeof(*func->template) * num, 186 - GFP_KERNEL); 185 + func = kzalloc(struct_size(func, template, num), GFP_KERNEL); 187 186 if (!func) 188 187 return ERR_PTR(-ENOMEM); 189 188
+3 -2
drivers/mtd/spi-nor/aspeed-smc.c
··· 861 861 return -ENODEV; 862 862 info = match->data; 863 863 864 - controller = devm_kzalloc(&pdev->dev, sizeof(*controller) + 865 - info->nce * sizeof(controller->chips[0]), GFP_KERNEL); 864 + controller = devm_kzalloc(&pdev->dev, 865 + struct_size(controller, chips, info->nce), 866 + GFP_KERNEL); 866 867 if (!controller) 867 868 return -ENOMEM; 868 869 controller->info = info;
+1 -2
drivers/net/can/peak_canfd/peak_pciefd_main.c
··· 752 752 can_count = 1; 753 753 754 754 /* allocate board structure object */ 755 - pciefd = devm_kzalloc(&pdev->dev, sizeof(*pciefd) + 756 - can_count * sizeof(*pciefd->can), 755 + pciefd = devm_kzalloc(&pdev->dev, struct_size(pciefd, can, can_count), 757 756 GFP_KERNEL); 758 757 if (!pciefd) { 759 758 err = -ENOMEM;
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
··· 494 494 int err; 495 495 int i; 496 496 497 - d = kzalloc(sizeof(*d) + nfile * sizeof(d->fields[0]), GFP_KERNEL); 497 + d = kzalloc(struct_size(d, fields, nfile), GFP_KERNEL); 498 498 if (!d) 499 499 return -ENOMEM; 500 500
+1 -2
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 1191 1191 { 1192 1192 struct mlx5_flow_handle *handle; 1193 1193 1194 - handle = kzalloc(sizeof(*handle) + sizeof(handle->rule[0]) * 1195 - num_rules, GFP_KERNEL); 1194 + handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL); 1196 1195 if (!handle) 1197 1196 return NULL; 1198 1197
+2 -3
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
··· 2987 2987 2988 2988 mvmsta = iwl_mvm_sta_from_mac80211(sta); 2989 2989 WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx])); 2990 - ptk_pn = kzalloc(sizeof(*ptk_pn) + 2991 - mvm->trans->num_rx_queues * 2992 - sizeof(ptk_pn->q[0]), 2990 + ptk_pn = kzalloc(struct_size(ptk_pn, q, 2991 + mvm->trans->num_rx_queues), 2993 2992 GFP_KERNEL); 2994 2993 if (!ptk_pn) { 2995 2994 ret = -ENOMEM;
+1 -2
drivers/net/wireless/mediatek/mt76/agg-rx.c
··· 236 236 237 237 mt76_rx_aggr_stop(dev, wcid, tidno); 238 238 239 - tid = kzalloc(sizeof(*tid) + size * sizeof(tid->reorder_buf[0]), 240 - GFP_KERNEL); 239 + tid = kzalloc(struct_size(tid, reorder_buf, size), GFP_KERNEL); 241 240 if (!tid) 242 241 return -ENOMEM; 243 242
+2 -2
drivers/pinctrl/samsung/pinctrl-s3c64xx.c
··· 483 483 ++nr_domains; 484 484 } 485 485 486 - data = devm_kzalloc(dev, sizeof(*data) 487 - + nr_domains * sizeof(*data->domains), GFP_KERNEL); 486 + data = devm_kzalloc(dev, struct_size(data, domains, nr_domains), 487 + GFP_KERNEL); 488 488 if (!data) 489 489 return -ENOMEM; 490 490 data->drvdata = d;
+1 -2
drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
··· 759 759 760 760 nregs = DIV_ROUND_UP(count * width, 32); 761 761 762 - region = devm_kzalloc(dev, 763 - sizeof(*region) + sizeof(region->vals[0]) * nregs, 762 + region = devm_kzalloc(dev, struct_size(region, vals, nregs), 764 763 GFP_KERNEL); 765 764 if (!region) 766 765 return -ENOMEM;
+3 -3
drivers/regulator/mc13783-regulator.c
··· 409 409 if (num_regulators <= 0) 410 410 return -EINVAL; 411 411 412 - priv = devm_kzalloc(&pdev->dev, sizeof(*priv) + 413 - num_regulators * sizeof(priv->regulators[0]), 414 - GFP_KERNEL); 412 + priv = devm_kzalloc(&pdev->dev, 413 + struct_size(priv, regulators, num_regulators), 414 + GFP_KERNEL); 415 415 if (!priv) 416 416 return -ENOMEM; 417 417
+3 -3
drivers/regulator/mc13892-regulator.c
··· 547 547 if (num_regulators <= 0) 548 548 return -EINVAL; 549 549 550 - priv = devm_kzalloc(&pdev->dev, sizeof(*priv) + 551 - num_regulators * sizeof(priv->regulators[0]), 552 - GFP_KERNEL); 550 + priv = devm_kzalloc(&pdev->dev, 551 + struct_size(priv, regulators, num_regulators), 552 + GFP_KERNEL); 553 553 if (!priv) 554 554 return -ENOMEM; 555 555
+1 -2
drivers/reset/core.c
··· 730 730 if (num < 0) 731 731 return optional ? NULL : ERR_PTR(num); 732 732 733 - resets = kzalloc(sizeof(*resets) + sizeof(resets->rstc[0]) * num, 734 - GFP_KERNEL); 733 + resets = kzalloc(struct_size(resets, rstc, num), GFP_KERNEL); 735 734 if (!resets) 736 735 return ERR_PTR(-ENOMEM); 737 736
+4 -4
drivers/rtc/rtc-ac100.c
··· 317 317 const char *parents[2] = {AC100_RTC_32K_NAME}; 318 318 int i, ret; 319 319 320 - chip->clk_data = devm_kzalloc(chip->dev, sizeof(*chip->clk_data) + 321 - sizeof(*chip->clk_data->hws) * 322 - AC100_CLKOUT_NUM, 323 - GFP_KERNEL); 320 + chip->clk_data = devm_kzalloc(chip->dev, 321 + struct_size(chip->clk_data, hws, 322 + AC100_CLKOUT_NUM), 323 + GFP_KERNEL); 324 324 if (!chip->clk_data) 325 325 return -ENOMEM; 326 326
+1 -2
drivers/s390/cio/ccwgroup.c
··· 326 326 if (num_devices < 1) 327 327 return -EINVAL; 328 328 329 - gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]), 330 - GFP_KERNEL); 329 + gdev = kzalloc(struct_size(gdev, cdev, num_devices), GFP_KERNEL); 331 330 if (!gdev) 332 331 return -ENOMEM; 333 332
+2 -2
drivers/soc/actions/owl-sps.c
··· 117 117 118 118 sps_info = match->data; 119 119 120 - sps = devm_kzalloc(&pdev->dev, sizeof(*sps) + 121 - sps_info->num_domains * sizeof(sps->domains[0]), 120 + sps = devm_kzalloc(&pdev->dev, 121 + struct_size(sps, domains, sps_info->num_domains), 122 122 GFP_KERNEL); 123 123 if (!sps) 124 124 return -ENOMEM;
+1 -2
drivers/soc/rockchip/pm_domains.c
··· 626 626 pmu_info = match->data; 627 627 628 628 pmu = devm_kzalloc(dev, 629 - sizeof(*pmu) + 630 - pmu_info->num_domains * sizeof(pmu->domains[0]), 629 + struct_size(pmu, domains, pmu_info->num_domains), 631 630 GFP_KERNEL); 632 631 if (!pmu) 633 632 return -ENOMEM;
+2 -2
drivers/staging/greybus/module.c
··· 94 94 struct gb_module *module; 95 95 int i; 96 96 97 - module = kzalloc(sizeof(*module) + num_interfaces * sizeof(intf), 98 - GFP_KERNEL); 97 + module = kzalloc(struct_size(module, interfaces, num_interfaces), 98 + GFP_KERNEL); 99 99 if (!module) 100 100 return NULL; 101 101
+3 -3
drivers/thermal/qcom/tsens.c
··· 112 112 int ret, i; 113 113 struct device *dev; 114 114 struct device_node *np; 115 - struct tsens_sensor *s; 116 115 struct tsens_device *tmdev; 117 116 const struct tsens_data *data; 118 117 const struct of_device_id *id; ··· 134 135 return -EINVAL; 135 136 } 136 137 137 - tmdev = devm_kzalloc(dev, sizeof(*tmdev) + 138 - data->num_sensors * sizeof(*s), GFP_KERNEL); 138 + tmdev = devm_kzalloc(dev, 139 + struct_size(tmdev, sensor, data->num_sensors), 140 + GFP_KERNEL); 139 141 if (!tmdev) 140 142 return -ENOMEM; 141 143
+2 -3
drivers/usb/gadget/function/f_midi.c
··· 1301 1301 } 1302 1302 1303 1303 /* allocate and initialize one new instance */ 1304 - midi = kzalloc( 1305 - sizeof(*midi) + opts->in_ports * sizeof(*midi->in_ports_array), 1306 - GFP_KERNEL); 1304 + midi = kzalloc(struct_size(midi, in_ports_array, opts->in_ports), 1305 + GFP_KERNEL); 1307 1306 if (!midi) { 1308 1307 status = -ENOMEM; 1309 1308 goto setup_fail;
+1 -2
drivers/zorro/zorro.c
··· 138 138 int error; 139 139 140 140 /* Initialize the Zorro bus */ 141 - bus = kzalloc(sizeof(*bus) + 142 - zorro_num_autocon * sizeof(bus->devices[0]), 141 + bus = kzalloc(struct_size(bus, devices, zorro_num_autocon), 143 142 GFP_KERNEL); 144 143 if (!bus) 145 144 return -ENOMEM;
+1 -2
fs/afs/addr_list.c
··· 43 43 44 44 _enter("%u,%u,%u", nr, service, port); 45 45 46 - alist = kzalloc(sizeof(*alist) + sizeof(alist->addrs[0]) * nr, 47 - GFP_KERNEL); 46 + alist = kzalloc(struct_size(alist, addrs, nr), GFP_KERNEL); 48 47 if (!alist) 49 48 return NULL; 50 49
+14
include/linux/compiler-clang.h
··· 32 32 #ifdef __noretpoline 33 33 #undef __noretpoline 34 34 #endif 35 + 36 + /* 37 + * Not all versions of clang implement the the type-generic versions 38 + * of the builtin overflow checkers. Fortunately, clang implements 39 + * __has_builtin allowing us to avoid awkward version 40 + * checks. Unfortunately, we don't know which version of gcc clang 41 + * pretends to be, so the macro may or may not be defined. 42 + */ 43 + #undef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 44 + #if __has_builtin(__builtin_mul_overflow) && \ 45 + __has_builtin(__builtin_add_overflow) && \ 46 + __has_builtin(__builtin_sub_overflow) 47 + #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 48 + #endif
+4
include/linux/compiler-gcc.h
··· 343 343 * code 344 344 */ 345 345 #define uninitialized_var(x) x = x 346 + 347 + #if GCC_VERSION >= 50100 348 + #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 349 + #endif
+4
include/linux/compiler-intel.h
··· 44 44 #define __builtin_bswap16 _bswap16 45 45 #endif 46 46 47 + /* 48 + * icc defines __GNUC__, but does not implement the builtin overflow checkers. 49 + */ 50 + #undef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
+6 -2
include/linux/device.h
··· 25 25 #include <linux/ratelimit.h> 26 26 #include <linux/uidgid.h> 27 27 #include <linux/gfp.h> 28 + #include <linux/overflow.h> 28 29 #include <asm/device.h> 29 30 30 31 struct device; ··· 673 672 static inline void *devm_kmalloc_array(struct device *dev, 674 673 size_t n, size_t size, gfp_t flags) 675 674 { 676 - if (size != 0 && n > SIZE_MAX / size) 675 + size_t bytes; 676 + 677 + if (unlikely(check_mul_overflow(n, size, &bytes))) 677 678 return NULL; 678 - return devm_kmalloc(dev, n * size, flags); 679 + 680 + return devm_kmalloc(dev, bytes, flags); 679 681 } 680 682 static inline void *devm_kcalloc(struct device *dev, 681 683 size_t n, size_t size, gfp_t flags)
+5 -2
include/linux/mm.h
··· 25 25 #include <linux/err.h> 26 26 #include <linux/page_ref.h> 27 27 #include <linux/memremap.h> 28 + #include <linux/overflow.h> 28 29 29 30 struct mempolicy; 30 31 struct anon_vma; ··· 561 560 562 561 static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags) 563 562 { 564 - if (size != 0 && n > SIZE_MAX / size) 563 + size_t bytes; 564 + 565 + if (unlikely(check_mul_overflow(n, size, &bytes))) 565 566 return NULL; 566 567 567 - return kvmalloc(n * size, flags); 568 + return kvmalloc(bytes, flags); 568 569 } 569 570 570 571 extern void kvfree(const void *addr);
+278
include/linux/overflow.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 + #ifndef __LINUX_OVERFLOW_H 3 + #define __LINUX_OVERFLOW_H 4 + 5 + #include <linux/compiler.h> 6 + 7 + /* 8 + * In the fallback code below, we need to compute the minimum and 9 + * maximum values representable in a given type. These macros may also 10 + * be useful elsewhere, so we provide them outside the 11 + * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block. 12 + * 13 + * It would seem more obvious to do something like 14 + * 15 + * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0) 16 + * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0) 17 + * 18 + * Unfortunately, the middle expressions, strictly speaking, have 19 + * undefined behaviour, and at least some versions of gcc warn about 20 + * the type_max expression (but not if -fsanitize=undefined is in 21 + * effect; in that case, the warning is deferred to runtime...). 22 + * 23 + * The slightly excessive casting in type_min is to make sure the 24 + * macros also produce sensible values for the exotic type _Bool. [The 25 + * overflow checkers only almost work for _Bool, but that's 26 + * a-feature-not-a-bug, since people shouldn't be doing arithmetic on 27 + * _Bools. Besides, the gcc builtins don't allow _Bool* as third 28 + * argument.] 29 + * 30 + * Idea stolen from 31 + * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html - 32 + * credit to Christian Biere. 33 + */ 34 + #define is_signed_type(type) (((type)(-1)) < (type)1) 35 + #define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type))) 36 + #define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T))) 37 + #define type_min(T) ((T)((T)-type_max(T)-(T)1)) 38 + 39 + 40 + #ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 41 + /* 42 + * For simplicity and code hygiene, the fallback code below insists on 43 + * a, b and *d having the same type (similar to the min() and max() 44 + * macros), whereas gcc's type-generic overflow checkers accept 45 + * different types. Hence we don't just make check_add_overflow an 46 + * alias for __builtin_add_overflow, but add type checks similar to 47 + * below. 48 + */ 49 + #define check_add_overflow(a, b, d) ({ \ 50 + typeof(a) __a = (a); \ 51 + typeof(b) __b = (b); \ 52 + typeof(d) __d = (d); \ 53 + (void) (&__a == &__b); \ 54 + (void) (&__a == __d); \ 55 + __builtin_add_overflow(__a, __b, __d); \ 56 + }) 57 + 58 + #define check_sub_overflow(a, b, d) ({ \ 59 + typeof(a) __a = (a); \ 60 + typeof(b) __b = (b); \ 61 + typeof(d) __d = (d); \ 62 + (void) (&__a == &__b); \ 63 + (void) (&__a == __d); \ 64 + __builtin_sub_overflow(__a, __b, __d); \ 65 + }) 66 + 67 + #define check_mul_overflow(a, b, d) ({ \ 68 + typeof(a) __a = (a); \ 69 + typeof(b) __b = (b); \ 70 + typeof(d) __d = (d); \ 71 + (void) (&__a == &__b); \ 72 + (void) (&__a == __d); \ 73 + __builtin_mul_overflow(__a, __b, __d); \ 74 + }) 75 + 76 + #else 77 + 78 + 79 + /* Checking for unsigned overflow is relatively easy without causing UB. */ 80 + #define __unsigned_add_overflow(a, b, d) ({ \ 81 + typeof(a) __a = (a); \ 82 + typeof(b) __b = (b); \ 83 + typeof(d) __d = (d); \ 84 + (void) (&__a == &__b); \ 85 + (void) (&__a == __d); \ 86 + *__d = __a + __b; \ 87 + *__d < __a; \ 88 + }) 89 + #define __unsigned_sub_overflow(a, b, d) ({ \ 90 + typeof(a) __a = (a); \ 91 + typeof(b) __b = (b); \ 92 + typeof(d) __d = (d); \ 93 + (void) (&__a == &__b); \ 94 + (void) (&__a == __d); \ 95 + *__d = __a - __b; \ 96 + __a < __b; \ 97 + }) 98 + /* 99 + * If one of a or b is a compile-time constant, this avoids a division. 100 + */ 101 + #define __unsigned_mul_overflow(a, b, d) ({ \ 102 + typeof(a) __a = (a); \ 103 + typeof(b) __b = (b); \ 104 + typeof(d) __d = (d); \ 105 + (void) (&__a == &__b); \ 106 + (void) (&__a == __d); \ 107 + *__d = __a * __b; \ 108 + __builtin_constant_p(__b) ? \ 109 + __b > 0 && __a > type_max(typeof(__a)) / __b : \ 110 + __a > 0 && __b > type_max(typeof(__b)) / __a; \ 111 + }) 112 + 113 + /* 114 + * For signed types, detecting overflow is much harder, especially if 115 + * we want to avoid UB. But the interface of these macros is such that 116 + * we must provide a result in *d, and in fact we must produce the 117 + * result promised by gcc's builtins, which is simply the possibly 118 + * wrapped-around value. Fortunately, we can just formally do the 119 + * operations in the widest relevant unsigned type (u64) and then 120 + * truncate the result - gcc is smart enough to generate the same code 121 + * with and without the (u64) casts. 122 + */ 123 + 124 + /* 125 + * Adding two signed integers can overflow only if they have the same 126 + * sign, and overflow has happened iff the result has the opposite 127 + * sign. 128 + */ 129 + #define __signed_add_overflow(a, b, d) ({ \ 130 + typeof(a) __a = (a); \ 131 + typeof(b) __b = (b); \ 132 + typeof(d) __d = (d); \ 133 + (void) (&__a == &__b); \ 134 + (void) (&__a == __d); \ 135 + *__d = (u64)__a + (u64)__b; \ 136 + (((~(__a ^ __b)) & (*__d ^ __a)) \ 137 + & type_min(typeof(__a))) != 0; \ 138 + }) 139 + 140 + /* 141 + * Subtraction is similar, except that overflow can now happen only 142 + * when the signs are opposite. In this case, overflow has happened if 143 + * the result has the opposite sign of a. 144 + */ 145 + #define __signed_sub_overflow(a, b, d) ({ \ 146 + typeof(a) __a = (a); \ 147 + typeof(b) __b = (b); \ 148 + typeof(d) __d = (d); \ 149 + (void) (&__a == &__b); \ 150 + (void) (&__a == __d); \ 151 + *__d = (u64)__a - (u64)__b; \ 152 + ((((__a ^ __b)) & (*__d ^ __a)) \ 153 + & type_min(typeof(__a))) != 0; \ 154 + }) 155 + 156 + /* 157 + * Signed multiplication is rather hard. gcc always follows C99, so 158 + * division is truncated towards 0. This means that we can write the 159 + * overflow check like this: 160 + * 161 + * (a > 0 && (b > MAX/a || b < MIN/a)) || 162 + * (a < -1 && (b > MIN/a || b < MAX/a) || 163 + * (a == -1 && b == MIN) 164 + * 165 + * The redundant casts of -1 are to silence an annoying -Wtype-limits 166 + * (included in -Wextra) warning: When the type is u8 or u16, the 167 + * __b_c_e in check_mul_overflow obviously selects 168 + * __unsigned_mul_overflow, but unfortunately gcc still parses this 169 + * code and warns about the limited range of __b. 170 + */ 171 + 172 + #define __signed_mul_overflow(a, b, d) ({ \ 173 + typeof(a) __a = (a); \ 174 + typeof(b) __b = (b); \ 175 + typeof(d) __d = (d); \ 176 + typeof(a) __tmax = type_max(typeof(a)); \ 177 + typeof(a) __tmin = type_min(typeof(a)); \ 178 + (void) (&__a == &__b); \ 179 + (void) (&__a == __d); \ 180 + *__d = (u64)__a * (u64)__b; \ 181 + (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \ 182 + (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \ 183 + (__b == (typeof(__b))-1 && __a == __tmin); \ 184 + }) 185 + 186 + 187 + #define check_add_overflow(a, b, d) \ 188 + __builtin_choose_expr(is_signed_type(typeof(a)), \ 189 + __signed_add_overflow(a, b, d), \ 190 + __unsigned_add_overflow(a, b, d)) 191 + 192 + #define check_sub_overflow(a, b, d) \ 193 + __builtin_choose_expr(is_signed_type(typeof(a)), \ 194 + __signed_sub_overflow(a, b, d), \ 195 + __unsigned_sub_overflow(a, b, d)) 196 + 197 + #define check_mul_overflow(a, b, d) \ 198 + __builtin_choose_expr(is_signed_type(typeof(a)), \ 199 + __signed_mul_overflow(a, b, d), \ 200 + __unsigned_mul_overflow(a, b, d)) 201 + 202 + 203 + #endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */ 204 + 205 + /** 206 + * array_size() - Calculate size of 2-dimensional array. 207 + * 208 + * @a: dimension one 209 + * @b: dimension two 210 + * 211 + * Calculates size of 2-dimensional array: @a * @b. 212 + * 213 + * Returns: number of bytes needed to represent the array or SIZE_MAX on 214 + * overflow. 215 + */ 216 + static inline __must_check size_t array_size(size_t a, size_t b) 217 + { 218 + size_t bytes; 219 + 220 + if (check_mul_overflow(a, b, &bytes)) 221 + return SIZE_MAX; 222 + 223 + return bytes; 224 + } 225 + 226 + /** 227 + * array3_size() - Calculate size of 3-dimensional array. 228 + * 229 + * @a: dimension one 230 + * @b: dimension two 231 + * @c: dimension three 232 + * 233 + * Calculates size of 3-dimensional array: @a * @b * @c. 234 + * 235 + * Returns: number of bytes needed to represent the array or SIZE_MAX on 236 + * overflow. 237 + */ 238 + static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) 239 + { 240 + size_t bytes; 241 + 242 + if (check_mul_overflow(a, b, &bytes)) 243 + return SIZE_MAX; 244 + if (check_mul_overflow(bytes, c, &bytes)) 245 + return SIZE_MAX; 246 + 247 + return bytes; 248 + } 249 + 250 + static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c) 251 + { 252 + size_t bytes; 253 + 254 + if (check_mul_overflow(n, size, &bytes)) 255 + return SIZE_MAX; 256 + if (check_add_overflow(bytes, c, &bytes)) 257 + return SIZE_MAX; 258 + 259 + return bytes; 260 + } 261 + 262 + /** 263 + * struct_size() - Calculate size of structure with trailing array. 264 + * @p: Pointer to the structure. 265 + * @member: Name of the array member. 266 + * @n: Number of elements in the array. 267 + * 268 + * Calculates size of memory needed for structure @p followed by an 269 + * array of @n @member elements. 270 + * 271 + * Return: number of bytes needed or SIZE_MAX on overflow. 272 + */ 273 + #define struct_size(p, member, n) \ 274 + __ab_c_size(n, \ 275 + sizeof(*(p)->member) + __must_be_array((p)->member),\ 276 + sizeof(*(p))) 277 + 278 + #endif /* __LINUX_OVERFLOW_H */
+11 -6
include/linux/slab.h
··· 13 13 #define _LINUX_SLAB_H 14 14 15 15 #include <linux/gfp.h> 16 + #include <linux/overflow.h> 16 17 #include <linux/types.h> 17 18 #include <linux/workqueue.h> 18 19 ··· 625 624 */ 626 625 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) 627 626 { 628 - if (size != 0 && n > SIZE_MAX / size) 627 + size_t bytes; 628 + 629 + if (unlikely(check_mul_overflow(n, size, &bytes))) 629 630 return NULL; 630 631 if (__builtin_constant_p(n) && __builtin_constant_p(size)) 631 - return kmalloc(n * size, flags); 632 - return __kmalloc(n * size, flags); 632 + return kmalloc(bytes, flags); 633 + return __kmalloc(bytes, flags); 633 634 } 634 635 635 636 /** ··· 660 657 static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags, 661 658 int node) 662 659 { 663 - if (size != 0 && n > SIZE_MAX / size) 660 + size_t bytes; 661 + 662 + if (unlikely(check_mul_overflow(n, size, &bytes))) 664 663 return NULL; 665 664 if (__builtin_constant_p(n) && __builtin_constant_p(size)) 666 - return kmalloc_node(n * size, flags, node); 667 - return __kmalloc_node(n * size, flags, node); 665 + return kmalloc_node(bytes, flags, node); 666 + return __kmalloc_node(bytes, flags, node); 668 667 } 669 668 670 669 static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
+1
include/linux/vmalloc.h
··· 8 8 #include <linux/llist.h> 9 9 #include <asm/page.h> /* pgprot_t */ 10 10 #include <linux/rbtree.h> 11 + #include <linux/overflow.h> 11 12 12 13 struct vm_area_struct; /* vma defining user mapping in mm_types.h */ 13 14 struct notifier_block; /* in notifier.h */
+2 -2
kernel/cgroup/cgroup.c
··· 4820 4820 int ret; 4821 4821 4822 4822 /* allocate the cgroup and its ID, 0 is reserved for the root */ 4823 - cgrp = kzalloc(sizeof(*cgrp) + 4824 - sizeof(cgrp->ancestor_ids[0]) * (level + 1), GFP_KERNEL); 4823 + cgrp = kzalloc(struct_size(cgrp, ancestor_ids, (level + 1)), 4824 + GFP_KERNEL); 4825 4825 if (!cgrp) 4826 4826 return ERR_PTR(-ENOMEM); 4827 4827
+1 -2
kernel/module.c
··· 1604 1604 if (notes == 0) 1605 1605 return; 1606 1606 1607 - notes_attrs = kzalloc(sizeof(*notes_attrs) 1608 - + notes * sizeof(notes_attrs->attrs[0]), 1607 + notes_attrs = kzalloc(struct_size(notes_attrs, attrs, notes), 1609 1608 GFP_KERNEL); 1610 1609 if (notes_attrs == NULL) 1611 1610 return;
+1 -2
kernel/workqueue.c
··· 3714 3714 3715 3715 lockdep_assert_held(&wq_pool_mutex); 3716 3716 3717 - ctx = kzalloc(sizeof(*ctx) + nr_node_ids * sizeof(ctx->pwq_tbl[0]), 3718 - GFP_KERNEL); 3717 + ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL); 3719 3718 3720 3719 new_attrs = alloc_workqueue_attrs(GFP_KERNEL); 3721 3720 tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
+3
lib/Kconfig.debug
··· 1802 1802 config TEST_UUID 1803 1803 tristate "Test functions located in the uuid module at runtime" 1804 1804 1805 + config TEST_OVERFLOW 1806 + tristate "Test check_*_overflow() functions at runtime" 1807 + 1805 1808 config TEST_RHASHTABLE 1806 1809 tristate "Perform selftest on resizable hash table" 1807 1810 default n
+1
lib/Makefile
··· 60 60 obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o 61 61 obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o 62 62 obj-$(CONFIG_TEST_LKM) += test_module.o 63 + obj-$(CONFIG_TEST_OVERFLOW) += test_overflow.o 63 64 obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o 64 65 obj-$(CONFIG_TEST_SORT) += test_sort.o 65 66 obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
+417
lib/test_overflow.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR MIT 2 + /* 3 + * Test cases for arithmetic overflow checks. 4 + */ 5 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 6 + 7 + #include <linux/device.h> 8 + #include <linux/init.h> 9 + #include <linux/kernel.h> 10 + #include <linux/mm.h> 11 + #include <linux/module.h> 12 + #include <linux/overflow.h> 13 + #include <linux/slab.h> 14 + #include <linux/types.h> 15 + #include <linux/vmalloc.h> 16 + 17 + #define DEFINE_TEST_ARRAY(t) \ 18 + static const struct test_ ## t { \ 19 + t a, b; \ 20 + t sum, diff, prod; \ 21 + bool s_of, d_of, p_of; \ 22 + } t ## _tests[] __initconst 23 + 24 + DEFINE_TEST_ARRAY(u8) = { 25 + {0, 0, 0, 0, 0, false, false, false}, 26 + {1, 1, 2, 0, 1, false, false, false}, 27 + {0, 1, 1, U8_MAX, 0, false, true, false}, 28 + {1, 0, 1, 1, 0, false, false, false}, 29 + {0, U8_MAX, U8_MAX, 1, 0, false, true, false}, 30 + {U8_MAX, 0, U8_MAX, U8_MAX, 0, false, false, false}, 31 + {1, U8_MAX, 0, 2, U8_MAX, true, true, false}, 32 + {U8_MAX, 1, 0, U8_MAX-1, U8_MAX, true, false, false}, 33 + {U8_MAX, U8_MAX, U8_MAX-1, 0, 1, true, false, true}, 34 + 35 + {U8_MAX, U8_MAX-1, U8_MAX-2, 1, 2, true, false, true}, 36 + {U8_MAX-1, U8_MAX, U8_MAX-2, U8_MAX, 2, true, true, true}, 37 + 38 + {1U << 3, 1U << 3, 1U << 4, 0, 1U << 6, false, false, false}, 39 + {1U << 4, 1U << 4, 1U << 5, 0, 0, false, false, true}, 40 + {1U << 4, 1U << 3, 3*(1U << 3), 1U << 3, 1U << 7, false, false, false}, 41 + {1U << 7, 1U << 7, 0, 0, 0, true, false, true}, 42 + 43 + {48, 32, 80, 16, 0, false, false, true}, 44 + {128, 128, 0, 0, 0, true, false, true}, 45 + {123, 234, 101, 145, 110, true, true, true}, 46 + }; 47 + DEFINE_TEST_ARRAY(u16) = { 48 + {0, 0, 0, 0, 0, false, false, false}, 49 + {1, 1, 2, 0, 1, false, false, false}, 50 + {0, 1, 1, U16_MAX, 0, false, true, false}, 51 + {1, 0, 1, 1, 0, false, false, false}, 52 + {0, U16_MAX, U16_MAX, 1, 0, false, true, false}, 53 + {U16_MAX, 0, U16_MAX, U16_MAX, 0, false, false, false}, 54 + {1, U16_MAX, 0, 2, U16_MAX, true, true, false}, 55 + {U16_MAX, 1, 0, U16_MAX-1, U16_MAX, true, false, false}, 56 + {U16_MAX, U16_MAX, U16_MAX-1, 0, 1, true, false, true}, 57 + 58 + {U16_MAX, U16_MAX-1, U16_MAX-2, 1, 2, true, false, true}, 59 + {U16_MAX-1, U16_MAX, U16_MAX-2, U16_MAX, 2, true, true, true}, 60 + 61 + {1U << 7, 1U << 7, 1U << 8, 0, 1U << 14, false, false, false}, 62 + {1U << 8, 1U << 8, 1U << 9, 0, 0, false, false, true}, 63 + {1U << 8, 1U << 7, 3*(1U << 7), 1U << 7, 1U << 15, false, false, false}, 64 + {1U << 15, 1U << 15, 0, 0, 0, true, false, true}, 65 + 66 + {123, 234, 357, 65425, 28782, false, true, false}, 67 + {1234, 2345, 3579, 64425, 10146, false, true, true}, 68 + }; 69 + DEFINE_TEST_ARRAY(u32) = { 70 + {0, 0, 0, 0, 0, false, false, false}, 71 + {1, 1, 2, 0, 1, false, false, false}, 72 + {0, 1, 1, U32_MAX, 0, false, true, false}, 73 + {1, 0, 1, 1, 0, false, false, false}, 74 + {0, U32_MAX, U32_MAX, 1, 0, false, true, false}, 75 + {U32_MAX, 0, U32_MAX, U32_MAX, 0, false, false, false}, 76 + {1, U32_MAX, 0, 2, U32_MAX, true, true, false}, 77 + {U32_MAX, 1, 0, U32_MAX-1, U32_MAX, true, false, false}, 78 + {U32_MAX, U32_MAX, U32_MAX-1, 0, 1, true, false, true}, 79 + 80 + {U32_MAX, U32_MAX-1, U32_MAX-2, 1, 2, true, false, true}, 81 + {U32_MAX-1, U32_MAX, U32_MAX-2, U32_MAX, 2, true, true, true}, 82 + 83 + {1U << 15, 1U << 15, 1U << 16, 0, 1U << 30, false, false, false}, 84 + {1U << 16, 1U << 16, 1U << 17, 0, 0, false, false, true}, 85 + {1U << 16, 1U << 15, 3*(1U << 15), 1U << 15, 1U << 31, false, false, false}, 86 + {1U << 31, 1U << 31, 0, 0, 0, true, false, true}, 87 + 88 + {-2U, 1U, -1U, -3U, -2U, false, false, false}, 89 + {-4U, 5U, 1U, -9U, -20U, true, false, true}, 90 + }; 91 + 92 + DEFINE_TEST_ARRAY(u64) = { 93 + {0, 0, 0, 0, 0, false, false, false}, 94 + {1, 1, 2, 0, 1, false, false, false}, 95 + {0, 1, 1, U64_MAX, 0, false, true, false}, 96 + {1, 0, 1, 1, 0, false, false, false}, 97 + {0, U64_MAX, U64_MAX, 1, 0, false, true, false}, 98 + {U64_MAX, 0, U64_MAX, U64_MAX, 0, false, false, false}, 99 + {1, U64_MAX, 0, 2, U64_MAX, true, true, false}, 100 + {U64_MAX, 1, 0, U64_MAX-1, U64_MAX, true, false, false}, 101 + {U64_MAX, U64_MAX, U64_MAX-1, 0, 1, true, false, true}, 102 + 103 + {U64_MAX, U64_MAX-1, U64_MAX-2, 1, 2, true, false, true}, 104 + {U64_MAX-1, U64_MAX, U64_MAX-2, U64_MAX, 2, true, true, true}, 105 + 106 + {1ULL << 31, 1ULL << 31, 1ULL << 32, 0, 1ULL << 62, false, false, false}, 107 + {1ULL << 32, 1ULL << 32, 1ULL << 33, 0, 0, false, false, true}, 108 + {1ULL << 32, 1ULL << 31, 3*(1ULL << 31), 1ULL << 31, 1ULL << 63, false, false, false}, 109 + {1ULL << 63, 1ULL << 63, 0, 0, 0, true, false, true}, 110 + {1000000000ULL /* 10^9 */, 10000000000ULL /* 10^10 */, 111 + 11000000000ULL, 18446744064709551616ULL, 10000000000000000000ULL, 112 + false, true, false}, 113 + {-15ULL, 10ULL, -5ULL, -25ULL, -150ULL, false, false, true}, 114 + }; 115 + 116 + DEFINE_TEST_ARRAY(s8) = { 117 + {0, 0, 0, 0, 0, false, false, false}, 118 + 119 + {0, S8_MAX, S8_MAX, -S8_MAX, 0, false, false, false}, 120 + {S8_MAX, 0, S8_MAX, S8_MAX, 0, false, false, false}, 121 + {0, S8_MIN, S8_MIN, S8_MIN, 0, false, true, false}, 122 + {S8_MIN, 0, S8_MIN, S8_MIN, 0, false, false, false}, 123 + 124 + {-1, S8_MIN, S8_MAX, S8_MAX, S8_MIN, true, false, true}, 125 + {S8_MIN, -1, S8_MAX, -S8_MAX, S8_MIN, true, false, true}, 126 + {-1, S8_MAX, S8_MAX-1, S8_MIN, -S8_MAX, false, false, false}, 127 + {S8_MAX, -1, S8_MAX-1, S8_MIN, -S8_MAX, false, true, false}, 128 + {-1, -S8_MAX, S8_MIN, S8_MAX-1, S8_MAX, false, false, false}, 129 + {-S8_MAX, -1, S8_MIN, S8_MIN+2, S8_MAX, false, false, false}, 130 + 131 + {1, S8_MIN, -S8_MAX, -S8_MAX, S8_MIN, false, true, false}, 132 + {S8_MIN, 1, -S8_MAX, S8_MAX, S8_MIN, false, true, false}, 133 + {1, S8_MAX, S8_MIN, S8_MIN+2, S8_MAX, true, false, false}, 134 + {S8_MAX, 1, S8_MIN, S8_MAX-1, S8_MAX, true, false, false}, 135 + 136 + {S8_MIN, S8_MIN, 0, 0, 0, true, false, true}, 137 + {S8_MAX, S8_MAX, -2, 0, 1, true, false, true}, 138 + 139 + {-4, -32, -36, 28, -128, false, false, true}, 140 + {-4, 32, 28, -36, -128, false, false, false}, 141 + }; 142 + 143 + DEFINE_TEST_ARRAY(s16) = { 144 + {0, 0, 0, 0, 0, false, false, false}, 145 + 146 + {0, S16_MAX, S16_MAX, -S16_MAX, 0, false, false, false}, 147 + {S16_MAX, 0, S16_MAX, S16_MAX, 0, false, false, false}, 148 + {0, S16_MIN, S16_MIN, S16_MIN, 0, false, true, false}, 149 + {S16_MIN, 0, S16_MIN, S16_MIN, 0, false, false, false}, 150 + 151 + {-1, S16_MIN, S16_MAX, S16_MAX, S16_MIN, true, false, true}, 152 + {S16_MIN, -1, S16_MAX, -S16_MAX, S16_MIN, true, false, true}, 153 + {-1, S16_MAX, S16_MAX-1, S16_MIN, -S16_MAX, false, false, false}, 154 + {S16_MAX, -1, S16_MAX-1, S16_MIN, -S16_MAX, false, true, false}, 155 + {-1, -S16_MAX, S16_MIN, S16_MAX-1, S16_MAX, false, false, false}, 156 + {-S16_MAX, -1, S16_MIN, S16_MIN+2, S16_MAX, false, false, false}, 157 + 158 + {1, S16_MIN, -S16_MAX, -S16_MAX, S16_MIN, false, true, false}, 159 + {S16_MIN, 1, -S16_MAX, S16_MAX, S16_MIN, false, true, false}, 160 + {1, S16_MAX, S16_MIN, S16_MIN+2, S16_MAX, true, false, false}, 161 + {S16_MAX, 1, S16_MIN, S16_MAX-1, S16_MAX, true, false, false}, 162 + 163 + {S16_MIN, S16_MIN, 0, 0, 0, true, false, true}, 164 + {S16_MAX, S16_MAX, -2, 0, 1, true, false, true}, 165 + }; 166 + DEFINE_TEST_ARRAY(s32) = { 167 + {0, 0, 0, 0, 0, false, false, false}, 168 + 169 + {0, S32_MAX, S32_MAX, -S32_MAX, 0, false, false, false}, 170 + {S32_MAX, 0, S32_MAX, S32_MAX, 0, false, false, false}, 171 + {0, S32_MIN, S32_MIN, S32_MIN, 0, false, true, false}, 172 + {S32_MIN, 0, S32_MIN, S32_MIN, 0, false, false, false}, 173 + 174 + {-1, S32_MIN, S32_MAX, S32_MAX, S32_MIN, true, false, true}, 175 + {S32_MIN, -1, S32_MAX, -S32_MAX, S32_MIN, true, false, true}, 176 + {-1, S32_MAX, S32_MAX-1, S32_MIN, -S32_MAX, false, false, false}, 177 + {S32_MAX, -1, S32_MAX-1, S32_MIN, -S32_MAX, false, true, false}, 178 + {-1, -S32_MAX, S32_MIN, S32_MAX-1, S32_MAX, false, false, false}, 179 + {-S32_MAX, -1, S32_MIN, S32_MIN+2, S32_MAX, false, false, false}, 180 + 181 + {1, S32_MIN, -S32_MAX, -S32_MAX, S32_MIN, false, true, false}, 182 + {S32_MIN, 1, -S32_MAX, S32_MAX, S32_MIN, false, true, false}, 183 + {1, S32_MAX, S32_MIN, S32_MIN+2, S32_MAX, true, false, false}, 184 + {S32_MAX, 1, S32_MIN, S32_MAX-1, S32_MAX, true, false, false}, 185 + 186 + {S32_MIN, S32_MIN, 0, 0, 0, true, false, true}, 187 + {S32_MAX, S32_MAX, -2, 0, 1, true, false, true}, 188 + }; 189 + DEFINE_TEST_ARRAY(s64) = { 190 + {0, 0, 0, 0, 0, false, false, false}, 191 + 192 + {0, S64_MAX, S64_MAX, -S64_MAX, 0, false, false, false}, 193 + {S64_MAX, 0, S64_MAX, S64_MAX, 0, false, false, false}, 194 + {0, S64_MIN, S64_MIN, S64_MIN, 0, false, true, false}, 195 + {S64_MIN, 0, S64_MIN, S64_MIN, 0, false, false, false}, 196 + 197 + {-1, S64_MIN, S64_MAX, S64_MAX, S64_MIN, true, false, true}, 198 + {S64_MIN, -1, S64_MAX, -S64_MAX, S64_MIN, true, false, true}, 199 + {-1, S64_MAX, S64_MAX-1, S64_MIN, -S64_MAX, false, false, false}, 200 + {S64_MAX, -1, S64_MAX-1, S64_MIN, -S64_MAX, false, true, false}, 201 + {-1, -S64_MAX, S64_MIN, S64_MAX-1, S64_MAX, false, false, false}, 202 + {-S64_MAX, -1, S64_MIN, S64_MIN+2, S64_MAX, false, false, false}, 203 + 204 + {1, S64_MIN, -S64_MAX, -S64_MAX, S64_MIN, false, true, false}, 205 + {S64_MIN, 1, -S64_MAX, S64_MAX, S64_MIN, false, true, false}, 206 + {1, S64_MAX, S64_MIN, S64_MIN+2, S64_MAX, true, false, false}, 207 + {S64_MAX, 1, S64_MIN, S64_MAX-1, S64_MAX, true, false, false}, 208 + 209 + {S64_MIN, S64_MIN, 0, 0, 0, true, false, true}, 210 + {S64_MAX, S64_MAX, -2, 0, 1, true, false, true}, 211 + 212 + {-1, -1, -2, 0, 1, false, false, false}, 213 + {-1, -128, -129, 127, 128, false, false, false}, 214 + {-128, -1, -129, -127, 128, false, false, false}, 215 + {0, -S64_MAX, -S64_MAX, S64_MAX, 0, false, false, false}, 216 + }; 217 + 218 + #define check_one_op(t, fmt, op, sym, a, b, r, of) do { \ 219 + t _r; \ 220 + bool _of; \ 221 + \ 222 + _of = check_ ## op ## _overflow(a, b, &_r); \ 223 + if (_of != of) { \ 224 + pr_warn("expected "fmt" "sym" "fmt \ 225 + " to%s overflow (type %s)\n", \ 226 + a, b, of ? "" : " not", #t); \ 227 + err = 1; \ 228 + } \ 229 + if (_r != r) { \ 230 + pr_warn("expected "fmt" "sym" "fmt" == " \ 231 + fmt", got "fmt" (type %s)\n", \ 232 + a, b, r, _r, #t); \ 233 + err = 1; \ 234 + } \ 235 + } while (0) 236 + 237 + #define DEFINE_TEST_FUNC(t, fmt) \ 238 + static int __init do_test_ ## t(const struct test_ ## t *p) \ 239 + { \ 240 + int err = 0; \ 241 + \ 242 + check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \ 243 + check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of); \ 244 + check_one_op(t, fmt, sub, "-", p->a, p->b, p->diff, p->d_of); \ 245 + check_one_op(t, fmt, mul, "*", p->a, p->b, p->prod, p->p_of); \ 246 + check_one_op(t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of); \ 247 + \ 248 + return err; \ 249 + } \ 250 + \ 251 + static int __init test_ ## t ## _overflow(void) { \ 252 + int err = 0; \ 253 + unsigned i; \ 254 + \ 255 + pr_info("%-3s: %zu tests\n", #t, ARRAY_SIZE(t ## _tests)); \ 256 + for (i = 0; i < ARRAY_SIZE(t ## _tests); ++i) \ 257 + err |= do_test_ ## t(&t ## _tests[i]); \ 258 + return err; \ 259 + } 260 + 261 + DEFINE_TEST_FUNC(u8, "%d"); 262 + DEFINE_TEST_FUNC(s8, "%d"); 263 + DEFINE_TEST_FUNC(u16, "%d"); 264 + DEFINE_TEST_FUNC(s16, "%d"); 265 + DEFINE_TEST_FUNC(u32, "%u"); 266 + DEFINE_TEST_FUNC(s32, "%d"); 267 + #if BITS_PER_LONG == 64 268 + DEFINE_TEST_FUNC(u64, "%llu"); 269 + DEFINE_TEST_FUNC(s64, "%lld"); 270 + #endif 271 + 272 + static int __init test_overflow_calculation(void) 273 + { 274 + int err = 0; 275 + 276 + err |= test_u8_overflow(); 277 + err |= test_s8_overflow(); 278 + err |= test_u16_overflow(); 279 + err |= test_s16_overflow(); 280 + err |= test_u32_overflow(); 281 + err |= test_s32_overflow(); 282 + #if BITS_PER_LONG == 64 283 + err |= test_u64_overflow(); 284 + err |= test_s64_overflow(); 285 + #endif 286 + 287 + return err; 288 + } 289 + 290 + /* 291 + * Deal with the various forms of allocator arguments. See comments above 292 + * the DEFINE_TEST_ALLOC() instances for mapping of the "bits". 293 + */ 294 + #define alloc010(alloc, arg, sz) alloc(sz, GFP_KERNEL) 295 + #define alloc011(alloc, arg, sz) alloc(sz, GFP_KERNEL, NUMA_NO_NODE) 296 + #define alloc000(alloc, arg, sz) alloc(sz) 297 + #define alloc001(alloc, arg, sz) alloc(sz, NUMA_NO_NODE) 298 + #define alloc110(alloc, arg, sz) alloc(arg, sz, GFP_KERNEL) 299 + #define free0(free, arg, ptr) free(ptr) 300 + #define free1(free, arg, ptr) free(arg, ptr) 301 + 302 + /* Wrap around to 8K */ 303 + #define TEST_SIZE (9 << PAGE_SHIFT) 304 + 305 + #define DEFINE_TEST_ALLOC(func, free_func, want_arg, want_gfp, want_node)\ 306 + static int __init test_ ## func (void *arg) \ 307 + { \ 308 + volatile size_t a = TEST_SIZE; \ 309 + volatile size_t b = (SIZE_MAX / TEST_SIZE) + 1; \ 310 + void *ptr; \ 311 + \ 312 + /* Tiny allocation test. */ \ 313 + ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, 1);\ 314 + if (!ptr) { \ 315 + pr_warn(#func " failed regular allocation?!\n"); \ 316 + return 1; \ 317 + } \ 318 + free ## want_arg (free_func, arg, ptr); \ 319 + \ 320 + /* Wrapped allocation test. */ \ 321 + ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \ 322 + a * b); \ 323 + if (!ptr) { \ 324 + pr_warn(#func " unexpectedly failed bad wrapping?!\n"); \ 325 + return 1; \ 326 + } \ 327 + free ## want_arg (free_func, arg, ptr); \ 328 + \ 329 + /* Saturated allocation test. */ \ 330 + ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \ 331 + array_size(a, b)); \ 332 + if (ptr) { \ 333 + pr_warn(#func " missed saturation!\n"); \ 334 + free ## want_arg (free_func, arg, ptr); \ 335 + return 1; \ 336 + } \ 337 + pr_info(#func " detected saturation\n"); \ 338 + return 0; \ 339 + } 340 + 341 + /* 342 + * Allocator uses a trailing node argument --------+ (e.g. kmalloc_node()) 343 + * Allocator uses the gfp_t argument -----------+ | (e.g. kmalloc()) 344 + * Allocator uses a special leading argument + | | (e.g. devm_kmalloc()) 345 + * | | | 346 + */ 347 + DEFINE_TEST_ALLOC(kmalloc, kfree, 0, 1, 0); 348 + DEFINE_TEST_ALLOC(kmalloc_node, kfree, 0, 1, 1); 349 + DEFINE_TEST_ALLOC(kzalloc, kfree, 0, 1, 0); 350 + DEFINE_TEST_ALLOC(kzalloc_node, kfree, 0, 1, 1); 351 + DEFINE_TEST_ALLOC(vmalloc, vfree, 0, 0, 0); 352 + DEFINE_TEST_ALLOC(vmalloc_node, vfree, 0, 0, 1); 353 + DEFINE_TEST_ALLOC(vzalloc, vfree, 0, 0, 0); 354 + DEFINE_TEST_ALLOC(vzalloc_node, vfree, 0, 0, 1); 355 + DEFINE_TEST_ALLOC(kvmalloc, kvfree, 0, 1, 0); 356 + DEFINE_TEST_ALLOC(kvmalloc_node, kvfree, 0, 1, 1); 357 + DEFINE_TEST_ALLOC(kvzalloc, kvfree, 0, 1, 0); 358 + DEFINE_TEST_ALLOC(kvzalloc_node, kvfree, 0, 1, 1); 359 + DEFINE_TEST_ALLOC(devm_kmalloc, devm_kfree, 1, 1, 0); 360 + DEFINE_TEST_ALLOC(devm_kzalloc, devm_kfree, 1, 1, 0); 361 + 362 + static int __init test_overflow_allocation(void) 363 + { 364 + const char device_name[] = "overflow-test"; 365 + struct device *dev; 366 + int err = 0; 367 + 368 + /* Create dummy device for devm_kmalloc()-family tests. */ 369 + dev = root_device_register(device_name); 370 + if (!dev) { 371 + pr_warn("Cannot register test device\n"); 372 + return 1; 373 + } 374 + 375 + err |= test_kmalloc(NULL); 376 + err |= test_kmalloc_node(NULL); 377 + err |= test_kzalloc(NULL); 378 + err |= test_kzalloc_node(NULL); 379 + err |= test_kvmalloc(NULL); 380 + err |= test_kvmalloc_node(NULL); 381 + err |= test_kvzalloc(NULL); 382 + err |= test_kvzalloc_node(NULL); 383 + err |= test_vmalloc(NULL); 384 + err |= test_vmalloc_node(NULL); 385 + err |= test_vzalloc(NULL); 386 + err |= test_vzalloc_node(NULL); 387 + err |= test_devm_kmalloc(dev); 388 + err |= test_devm_kzalloc(dev); 389 + 390 + device_unregister(dev); 391 + 392 + return err; 393 + } 394 + 395 + static int __init test_module_init(void) 396 + { 397 + int err = 0; 398 + 399 + err |= test_overflow_calculation(); 400 + err |= test_overflow_allocation(); 401 + 402 + if (err) { 403 + pr_warn("FAIL!\n"); 404 + err = -EINVAL; 405 + } else { 406 + pr_info("all tests passed\n"); 407 + } 408 + 409 + return err; 410 + } 411 + 412 + static void __exit test_module_exit(void) 413 + { } 414 + 415 + module_init(test_module_init); 416 + module_exit(test_module_exit); 417 + MODULE_LICENSE("Dual MIT/GPL");
+2 -3
net/ceph/mon_client.c
··· 62 62 63 63 if (num_mon > CEPH_MAX_MON) 64 64 goto bad; 65 - m = kmalloc(sizeof(*m) + sizeof(m->mon_inst[0])*num_mon, GFP_NOFS); 65 + m = kmalloc(struct_size(m, mon_inst, num_mon), GFP_NOFS); 66 66 if (m == NULL) 67 67 return ERR_PTR(-ENOMEM); 68 68 m->fsid = fsid; ··· 1000 1000 int i; 1001 1001 1002 1002 /* build initial monmap */ 1003 - monc->monmap = kzalloc(sizeof(*monc->monmap) + 1004 - num_mon*sizeof(monc->monmap->mon_inst[0]), 1003 + monc->monmap = kzalloc(struct_size(monc->monmap, mon_inst, num_mon), 1005 1004 GFP_KERNEL); 1006 1005 if (!monc->monmap) 1007 1006 return -ENOMEM;
+1 -2
net/ceph/osd_client.c
··· 584 584 req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags); 585 585 } else { 586 586 BUG_ON(num_ops > CEPH_OSD_MAX_OPS); 587 - req = kmalloc(sizeof(*req) + num_ops * sizeof(req->r_ops[0]), 588 - gfp_flags); 587 + req = kmalloc(struct_size(req, r_ops, num_ops), gfp_flags); 589 588 } 590 589 if (unlikely(!req)) 591 590 return NULL;
+1 -2
net/netfilter/xt_recent.c
··· 184 184 } 185 185 186 186 nstamps_max += 1; 187 - e = kmalloc(sizeof(*e) + sizeof(e->stamps[0]) * nstamps_max, 188 - GFP_ATOMIC); 187 + e = kmalloc(struct_size(e, stamps, nstamps_max), GFP_ATOMIC); 189 188 if (e == NULL) 190 189 return NULL; 191 190 memcpy(&e->addr, addr, sizeof(e->addr));
+2 -2
net/sctp/endpointola.c
··· 73 73 * variables. There are arrays that we encode directly 74 74 * into parameters to make the rest of the operations easier. 75 75 */ 76 - auth_hmacs = kzalloc(sizeof(*auth_hmacs) + 77 - sizeof(__u16) * SCTP_AUTH_NUM_HMACS, gfp); 76 + auth_hmacs = kzalloc(struct_size(auth_hmacs, hmac_ids, 77 + SCTP_AUTH_NUM_HMACS), gfp); 78 78 if (!auth_hmacs) 79 79 goto nomem; 80 80
+2 -2
sound/core/vmaster.c
··· 259 259 struct link_master *master_link = snd_kcontrol_chip(master); 260 260 struct link_slave *srec; 261 261 262 - srec = kzalloc(sizeof(*srec) + 263 - slave->count * sizeof(*slave->vd), GFP_KERNEL); 262 + srec = kzalloc(struct_size(srec, slave.vd, slave->count), 263 + GFP_KERNEL); 264 264 if (!srec) 265 265 return -ENOMEM; 266 266 srec->kctl = slave;
+2 -1
sound/soc/qcom/apq8016_sbc.c
··· 147 147 num_links = of_get_child_count(node); 148 148 149 149 /* Allocate the private data and the DAI link array */ 150 - data = devm_kzalloc(dev, sizeof(*data) + sizeof(*link) * num_links, 150 + data = devm_kzalloc(dev, 151 + struct_size(data, dai_link, num_links), 151 152 GFP_KERNEL); 152 153 if (!data) 153 154 return ERR_PTR(-ENOMEM);
+1 -1
sound/soc/soc-dapm.c
··· 1086 1086 list_for_each(it, widgets) 1087 1087 size++; 1088 1088 1089 - *list = kzalloc(sizeof(**list) + size * sizeof(*w), GFP_KERNEL); 1089 + *list = kzalloc(struct_size(*list, widgets, size), GFP_KERNEL); 1090 1090 if (*list == NULL) 1091 1091 return -ENOMEM; 1092 1092