Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'bitmap-for-5.19-rc1' of https://github.com/norov/linux

Pull bitmap updates from Yury Norov:

- bitmap: optimize bitmap_weight() usage, from me

- lib/bitmap.c make bitmap_print_bitmask_to_buf parseable, from Mauro
Carvalho Chehab

- include/linux/find: Fix documentation, from Anna-Maria Behnsen

- bitmap: fix conversion from/to fix-sized arrays, from me

- bitmap: Fix return values to be unsigned, from Kees Cook

It has been in linux-next for at least a week with no problems.

* tag 'bitmap-for-5.19-rc1' of https://github.com/norov/linux: (31 commits)
nodemask: Fix return values to be unsigned
bitmap: Fix return values to be unsigned
KVM: x86: hyper-v: replace bitmap_weight() with hweight64()
KVM: x86: hyper-v: fix type of valid_bank_mask
ia64: cleanup remove_siblinginfo()
drm/amd/pm: use bitmap_{from,to}_arr32 where appropriate
KVM: s390: replace bitmap_copy with bitmap_{from,to}_arr64 where appropriate
lib/bitmap: add test for bitmap_{from,to}_arr64
lib: add bitmap_{from,to}_arr64
lib/bitmap: extend comment for bitmap_(from,to)_arr32()
include/linux/find: Fix documentation
lib/bitmap.c make bitmap_print_bitmask_to_buf parseable
MAINTAINERS: add cpumask and nodemask files to BITMAP_API
arch/x86: replace nodes_weight with nodes_empty where appropriate
mm/vmstat: replace cpumask_weight with cpumask_empty where appropriate
clocksource: replace cpumask_weight with cpumask_empty in clocksource.c
genirq/affinity: replace cpumask_weight with cpumask_empty where appropriate
irq: mips: replace cpumask_weight with cpumask_empty where appropriate
drm/i915/pmu: replace cpumask_weight with cpumask_empty where appropriate
arch/x86: replace cpumask_weight with cpumask_empty where appropriate
...

+262 -206
+4
MAINTAINERS
··· 3533 3533 R: Rasmus Villemoes <linux@rasmusvillemoes.dk> 3534 3534 S: Maintained 3535 3535 F: include/linux/bitmap.h 3536 + F: include/linux/cpumask.h 3536 3537 F: include/linux/find.h 3538 + F: include/linux/nodemask.h 3537 3539 F: lib/bitmap.c 3540 + F: lib/cpumask.c 3538 3541 F: lib/find_bit.c 3539 3542 F: lib/find_bit_benchmark.c 3543 + F: lib/nodemask.c 3540 3544 F: lib/test_bitmap.c 3541 3545 F: tools/include/linux/bitmap.h 3542 3546 F: tools/include/linux/find.h
+1 -1
arch/alpha/kernel/process.c
··· 125 125 /* Wait for the secondaries to halt. */ 126 126 set_cpu_present(boot_cpuid, false); 127 127 set_cpu_possible(boot_cpuid, false); 128 - while (cpumask_weight(cpu_present_mask)) 128 + while (!cpumask_empty(cpu_present_mask)) 129 129 barrier(); 130 130 #endif 131 131
+1 -1
arch/ia64/kernel/setup.c
··· 572 572 #ifdef CONFIG_ACPI_HOTPLUG_CPU 573 573 prefill_possible_map(); 574 574 #endif 575 - per_cpu_scan_finalize((cpumask_weight(&early_cpu_possible_map) == 0 ? 575 + per_cpu_scan_finalize((cpumask_empty(&early_cpu_possible_map) ? 576 576 32 : cpumask_weight(&early_cpu_possible_map)), 577 577 additional_cpus > 0 ? additional_cpus : 0); 578 578 #endif /* CONFIG_ACPI_NUMA */
-4
arch/ia64/kernel/smpboot.c
··· 576 576 static void 577 577 remove_siblinginfo(int cpu) 578 578 { 579 - int last = 0; 580 - 581 579 if (cpu_data(cpu)->threads_per_core == 1 && 582 580 cpu_data(cpu)->cores_per_socket == 1) { 583 581 cpumask_clear_cpu(cpu, &cpu_core_map[cpu]); 584 582 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, cpu)); 585 583 return; 586 584 } 587 - 588 - last = (cpumask_weight(&cpu_core_map[cpu]) == 1 ? 1 : 0); 589 585 590 586 /* remove it from all sibling map's */ 591 587 clear_cpu_sibling_map(cpu);
+3 -4
arch/riscv/kernel/cpufeature.c
··· 213 213 else 214 214 elf_hwcap = this_hwcap; 215 215 216 - if (bitmap_weight(riscv_isa, RISCV_ISA_EXT_MAX)) 217 - bitmap_and(riscv_isa, riscv_isa, this_isa, RISCV_ISA_EXT_MAX); 218 - else 216 + if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX)) 219 217 bitmap_copy(riscv_isa, this_isa, RISCV_ISA_EXT_MAX); 220 - 218 + else 219 + bitmap_and(riscv_isa, riscv_isa, this_isa, RISCV_ISA_EXT_MAX); 221 220 } 222 221 223 222 /* We don't support systems with F but without D, so mask those out
+3 -7
arch/s390/kvm/kvm-s390.c
··· 1332 1332 mutex_unlock(&kvm->lock); 1333 1333 return -EBUSY; 1334 1334 } 1335 - bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat, 1336 - KVM_S390_VM_CPU_FEAT_NR_BITS); 1335 + bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS); 1337 1336 mutex_unlock(&kvm->lock); 1338 1337 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx", 1339 1338 data.feat[0], ··· 1503 1504 { 1504 1505 struct kvm_s390_vm_cpu_feat data; 1505 1506 1506 - bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat, 1507 - KVM_S390_VM_CPU_FEAT_NR_BITS); 1507 + bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS); 1508 1508 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) 1509 1509 return -EFAULT; 1510 1510 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx", ··· 1518 1520 { 1519 1521 struct kvm_s390_vm_cpu_feat data; 1520 1522 1521 - bitmap_copy((unsigned long *) data.feat, 1522 - kvm_s390_available_cpu_feat, 1523 - KVM_S390_VM_CPU_FEAT_NR_BITS); 1523 + bitmap_to_arr64(data.feat, kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS); 1524 1524 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) 1525 1525 return -EFAULT; 1526 1526 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
+6 -6
arch/x86/kvm/hyperv.c
··· 90 90 { 91 91 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic); 92 92 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); 93 - int auto_eoi_old, auto_eoi_new; 93 + bool auto_eoi_old, auto_eoi_new; 94 94 95 95 if (vector < HV_SYNIC_FIRST_VALID_VECTOR) 96 96 return; ··· 100 100 else 101 101 __clear_bit(vector, synic->vec_bitmap); 102 102 103 - auto_eoi_old = bitmap_weight(synic->auto_eoi_bitmap, 256); 103 + auto_eoi_old = !bitmap_empty(synic->auto_eoi_bitmap, 256); 104 104 105 105 if (synic_has_vector_auto_eoi(synic, vector)) 106 106 __set_bit(vector, synic->auto_eoi_bitmap); 107 107 else 108 108 __clear_bit(vector, synic->auto_eoi_bitmap); 109 109 110 - auto_eoi_new = bitmap_weight(synic->auto_eoi_bitmap, 256); 110 + auto_eoi_new = !bitmap_empty(synic->auto_eoi_bitmap, 256); 111 111 112 - if (!!auto_eoi_old == !!auto_eoi_new) 112 + if (auto_eoi_old == auto_eoi_new) 113 113 return; 114 114 115 115 if (!enable_apicv) ··· 1855 1855 all_cpus = flush_ex.hv_vp_set.format != 1856 1856 HV_GENERIC_SET_SPARSE_4K; 1857 1857 1858 - if (hc->var_cnt != bitmap_weight((unsigned long *)&valid_bank_mask, 64)) 1858 + if (hc->var_cnt != hweight64(valid_bank_mask)) 1859 1859 return HV_STATUS_INVALID_HYPERCALL_INPUT; 1860 1860 1861 1861 if (all_cpus) ··· 1956 1956 valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask; 1957 1957 all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL; 1958 1958 1959 - if (hc->var_cnt != bitmap_weight((unsigned long *)&valid_bank_mask, 64)) 1959 + if (hc->var_cnt != hweight64(valid_bank_mask)) 1960 1960 return HV_STATUS_INVALID_HYPERCALL_INPUT; 1961 1961 1962 1962 if (all_cpus)
+1 -1
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
··· 781 781 goto failed; 782 782 } 783 783 784 - bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64); 784 + bitmap_to_arr32(feature_mask, feature->allowed, 64); 785 785 786 786 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh, 787 787 feature_mask[1], NULL);
+1 -1
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
··· 837 837 feature->feature_num < 64) 838 838 return -EINVAL; 839 839 840 - bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64); 840 + bitmap_to_arr32(feature_mask, feature->allowed, 64); 841 841 842 842 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh, 843 843 feature_mask[1], NULL);
+1 -1
drivers/gpu/drm/i915/i915_pmu.c
··· 1047 1047 GEM_BUG_ON(!pmu->base.event_init); 1048 1048 1049 1049 /* Select the first online CPU as a designated reader. */ 1050 - if (!cpumask_weight(&i915_pmu_cpumask)) 1050 + if (cpumask_empty(&i915_pmu_cpumask)) 1051 1051 cpumask_set_cpu(cpu, &i915_pmu_cpumask); 1052 1052 1053 1053 return 0;
+19 -29
drivers/iio/dummy/iio_simple_dummy_buffer.c
··· 45 45 { 46 46 struct iio_poll_func *pf = p; 47 47 struct iio_dev *indio_dev = pf->indio_dev; 48 + int i = 0, j; 48 49 u16 *data; 49 50 50 51 data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL); 51 52 if (!data) 52 53 goto done; 53 54 54 - if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) { 55 - /* 56 - * Three common options here: 57 - * hardware scans: certain combinations of channels make 58 - * up a fast read. The capture will consist of all of them. 59 - * Hence we just call the grab data function and fill the 60 - * buffer without processing. 61 - * software scans: can be considered to be random access 62 - * so efficient reading is just a case of minimal bus 63 - * transactions. 64 - * software culled hardware scans: 65 - * occasionally a driver may process the nearest hardware 66 - * scan to avoid storing elements that are not desired. This 67 - * is the fiddliest option by far. 68 - * Here let's pretend we have random access. And the values are 69 - * in the constant table fakedata. 70 - */ 71 - int i, j; 72 - 73 - for (i = 0, j = 0; 74 - i < bitmap_weight(indio_dev->active_scan_mask, 75 - indio_dev->masklength); 76 - i++, j++) { 77 - j = find_next_bit(indio_dev->active_scan_mask, 78 - indio_dev->masklength, j); 79 - /* random access read from the 'device' */ 80 - data[i] = fakedata[j]; 81 - } 82 - } 55 + /* 56 + * Three common options here: 57 + * hardware scans: 58 + * certain combinations of channels make up a fast read. The capture 59 + * will consist of all of them. Hence we just call the grab data 60 + * function and fill the buffer without processing. 61 + * software scans: 62 + * can be considered to be random access so efficient reading is just 63 + * a case of minimal bus transactions. 64 + * software culled hardware scans: 65 + * occasionally a driver may process the nearest hardware scan to avoid 66 + * storing elements that are not desired. This is the fiddliest option 67 + * by far. 68 + * Here let's pretend we have random access. And the values are in the 69 + * constant table fakedata. 70 + */ 71 + for_each_set_bit(j, indio_dev->active_scan_mask, indio_dev->masklength) 72 + data[i++] = fakedata[j]; 83 73 84 74 iio_push_to_buffers_with_timestamp(indio_dev, data, 85 75 iio_get_time_ns(indio_dev));
+1 -5
drivers/net/dsa/b53/b53_common.c
··· 1603 1603 return 0; 1604 1604 } 1605 1605 1606 - if (bitmap_weight(free_bins, dev->num_arl_bins) == 0) 1607 - return -ENOSPC; 1608 - 1609 1606 *idx = find_first_bit(free_bins, dev->num_arl_bins); 1610 - 1611 - return -ENOENT; 1607 + return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT; 1612 1608 } 1613 1609 1614 1610 static int b53_arl_op(struct b53_device *dev, int op, int port,
+1 -5
drivers/net/ethernet/broadcom/bcmsysport.c
··· 2180 2180 if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE) 2181 2181 return -EOPNOTSUPP; 2182 2182 2183 - /* All filters are already in use, we cannot match more rules */ 2184 - if (bitmap_weight(priv->filters, RXCHK_BRCM_TAG_MAX) == 2185 - RXCHK_BRCM_TAG_MAX) 2186 - return -ENOSPC; 2187 - 2188 2183 index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX); 2189 2184 if (index >= RXCHK_BRCM_TAG_MAX) 2185 + /* All filters are already in use, we cannot match more rules */ 2190 2186 return -ENOSPC; 2191 2187 2192 2188 /* Location is the classification ID, and index is the position
+2 -2
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
··· 355 355 { 356 356 struct otx2_nic *pf = netdev_priv(netdev); 357 357 358 - if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap, 358 + if (!bitmap_empty(&pf->flow_cfg->dmacflt_bmap, 359 359 pf->flow_cfg->dmacflt_max_flows)) 360 360 netdev_warn(netdev, 361 361 "Add %pM to CGX/RPM DMAC filters list as well\n", ··· 438 438 return 0; 439 439 440 440 if (flow_cfg->nr_flows == flow_cfg->max_flows || 441 - bitmap_weight(&flow_cfg->dmacflt_bmap, 441 + !bitmap_empty(&flow_cfg->dmacflt_bmap, 442 442 flow_cfg->dmacflt_max_flows)) 443 443 return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows; 444 444 else
+1 -1
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
··· 1120 1120 struct msg_req *msg; 1121 1121 int err; 1122 1122 1123 - if (enable && bitmap_weight(&pf->flow_cfg->dmacflt_bmap, 1123 + if (enable && !bitmap_empty(&pf->flow_cfg->dmacflt_bmap, 1124 1124 pf->flow_cfg->dmacflt_max_flows)) 1125 1125 netdev_warn(pf->netdev, 1126 1126 "CGX/RPM internal loopback might not work as DMAC filters are active\n");
+6 -17
drivers/net/ethernet/mellanox/mlx4/cmd.c
··· 1994 1994 1995 1995 static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave) 1996 1996 { 1997 - int port, err; 1997 + int p, port, err; 1998 1998 struct mlx4_vport_state *vp_admin; 1999 1999 struct mlx4_vport_oper_state *vp_oper; 2000 2000 struct mlx4_slave_state *slave_state = 2001 2001 &priv->mfunc.master.slave_state[slave]; 2002 2002 struct mlx4_active_ports actv_ports = mlx4_get_active_ports( 2003 2003 &priv->dev, slave); 2004 - int min_port = find_first_bit(actv_ports.ports, 2005 - priv->dev.caps.num_ports) + 1; 2006 - int max_port = min_port - 1 + 2007 - bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports); 2008 2004 2009 - for (port = min_port; port <= max_port; port++) { 2010 - if (!test_bit(port - 1, actv_ports.ports)) 2011 - continue; 2005 + for_each_set_bit(p, actv_ports.ports, priv->dev.caps.num_ports) { 2006 + port = p + 1; 2012 2007 priv->mfunc.master.vf_oper[slave].smi_enabled[port] = 2013 2008 priv->mfunc.master.vf_admin[slave].enable_smi[port]; 2014 2009 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; ··· 2058 2063 2059 2064 static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave) 2060 2065 { 2061 - int port; 2066 + int p, port; 2062 2067 struct mlx4_vport_oper_state *vp_oper; 2063 2068 struct mlx4_active_ports actv_ports = mlx4_get_active_ports( 2064 2069 &priv->dev, slave); 2065 - int min_port = find_first_bit(actv_ports.ports, 2066 - priv->dev.caps.num_ports) + 1; 2067 - int max_port = min_port - 1 + 2068 - bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports); 2069 2070 2070 - 2071 - for (port = min_port; port <= max_port; port++) { 2072 - if (!test_bit(port - 1, actv_ports.ports)) 2073 - continue; 2071 + for_each_set_bit(p, actv_ports.ports, priv->dev.caps.num_ports) { 2072 + port = p + 1; 2074 2073 priv->mfunc.master.vf_oper[slave].smi_enabled[port] = 2075 2074 MLX4_VF_SMI_DISABLED; 2076 2075 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
+13 -30
drivers/net/ethernet/qlogic/qed/qed_rdma.c
··· 319 319 void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn, 320 320 struct qed_bmap *bmap, bool check) 321 321 { 322 - int weight = bitmap_weight(bmap->bitmap, bmap->max_count); 323 - int last_line = bmap->max_count / (64 * 8); 324 - int last_item = last_line * 8 + 325 - DIV_ROUND_UP(bmap->max_count % (64 * 8), 64); 326 - u64 *pmap = (u64 *)bmap->bitmap; 327 - int line, item, offset; 328 - u8 str_last_line[200] = { 0 }; 322 + unsigned int bit, weight, nbits; 323 + unsigned long *b; 329 324 330 - if (!weight || !check) 325 + if (!check) 326 + goto end; 327 + 328 + weight = bitmap_weight(bmap->bitmap, bmap->max_count); 329 + if (!weight) 331 330 goto end; 332 331 333 332 DP_NOTICE(p_hwfn, 334 333 "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n", 335 334 bmap->name, bmap->max_count, weight); 336 335 337 - /* print aligned non-zero lines, if any */ 338 - for (item = 0, line = 0; line < last_line; line++, item += 8) 339 - if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8)) 340 - DP_NOTICE(p_hwfn, 341 - "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", 342 - line, 343 - pmap[item], 344 - pmap[item + 1], 345 - pmap[item + 2], 346 - pmap[item + 3], 347 - pmap[item + 4], 348 - pmap[item + 5], 349 - pmap[item + 6], pmap[item + 7]); 336 + for (bit = 0; bit < bmap->max_count; bit += 512) { 337 + b = bmap->bitmap + BITS_TO_LONGS(bit); 338 + nbits = min(bmap->max_count - bit, 512U); 350 339 351 - /* print last unaligned non-zero line, if any */ 352 - if ((bmap->max_count % (64 * 8)) && 353 - (bitmap_weight((unsigned long *)&pmap[item], 354 - bmap->max_count - item * 64))) { 355 - offset = sprintf(str_last_line, "line 0x%04x: ", line); 356 - for (; item < last_item; item++) 357 - offset += sprintf(str_last_line + offset, 358 - "0x%016llx ", pmap[item]); 359 - DP_NOTICE(p_hwfn, "%s\n", str_last_line); 340 + if (!bitmap_empty(b, nbits)) 341 + DP_NOTICE(p_hwfn, 342 + "line 0x%04x: %*pb\n", bit / 512, nbits, b); 360 343 } 361 344 362 345 end:
+1 -1
drivers/net/ethernet/qlogic/qed/qed_roce.c
··· 76 76 * We delay for a short while if an async destroy QP is still expected. 77 77 * Beyond the added delay we clear the bitmap anyway. 78 78 */ 79 - while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) { 79 + while (!bitmap_empty(rcid_map->bitmap, rcid_map->max_count)) { 80 80 /* If the HW device is during recovery, all resources are 81 81 * immediately reset without receiving a per-cid indication 82 82 * from HW. In this case we don't expect the cid bitmap to be
+38 -18
include/linux/bitmap.h
··· 72 72 * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region 73 73 * bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst 74 74 * bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst 75 + * bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst 76 + * bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst 75 77 * bitmap_get_value8(map, start) Get 8bit value from map at start 76 78 * bitmap_set_value8(map, value, start) Set 8bit value to map at start 77 79 * ··· 134 132 * lib/bitmap.c provides these functions: 135 133 */ 136 134 137 - int __bitmap_equal(const unsigned long *bitmap1, 138 - const unsigned long *bitmap2, unsigned int nbits); 135 + bool __bitmap_equal(const unsigned long *bitmap1, 136 + const unsigned long *bitmap2, unsigned int nbits); 139 137 bool __pure __bitmap_or_equal(const unsigned long *src1, 140 138 const unsigned long *src2, 141 139 const unsigned long *src3, ··· 159 157 void __bitmap_replace(unsigned long *dst, 160 158 const unsigned long *old, const unsigned long *new, 161 159 const unsigned long *mask, unsigned int nbits); 162 - int __bitmap_intersects(const unsigned long *bitmap1, 163 - const unsigned long *bitmap2, unsigned int nbits); 164 - int __bitmap_subset(const unsigned long *bitmap1, 165 - const unsigned long *bitmap2, unsigned int nbits); 160 + bool __bitmap_intersects(const unsigned long *bitmap1, 161 + const unsigned long *bitmap2, unsigned int nbits); 162 + bool __bitmap_subset(const unsigned long *bitmap1, 163 + const unsigned long *bitmap2, unsigned int nbits); 166 164 int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); 167 165 void __bitmap_set(unsigned long *map, unsigned int start, int len); 168 166 void __bitmap_clear(unsigned long *map, unsigned int start, int len); ··· 266 264 } 267 265 268 266 /* 269 - * On 32-bit systems bitmaps are represented as u32 arrays internally, and 270 - * therefore conversion is not needed when copying data from/to arrays of u32. 267 + * On 32-bit systems bitmaps are represented as u32 arrays internally. On LE64 268 + * machines the order of hi and lo parts of numbers match the bitmap structure. 269 + * In both cases conversion is not needed when copying data from/to arrays of 270 + * u32. But in LE64 case, typecast in bitmap_copy_clear_tail() may lead 271 + * to out-of-bound access. To avoid that, both LE and BE variants of 64-bit 272 + * architectures are not using bitmap_copy_clear_tail(). 271 273 */ 272 274 #if BITS_PER_LONG == 64 273 275 void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, ··· 285 279 #define bitmap_to_arr32(buf, bitmap, nbits) \ 286 280 bitmap_copy_clear_tail((unsigned long *) (buf), \ 287 281 (const unsigned long *) (bitmap), (nbits)) 282 + #endif 283 + 284 + /* 285 + * On 64-bit systems bitmaps are represented as u64 arrays internally. On LE32 286 + * machines the order of hi and lo parts of numbers match the bitmap structure. 287 + * In both cases conversion is not needed when copying data from/to arrays of 288 + * u64. 289 + */ 290 + #if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN) 291 + void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits); 292 + void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits); 293 + #else 294 + #define bitmap_from_arr64(bitmap, buf, nbits) \ 295 + bitmap_copy_clear_tail((unsigned long *)(bitmap), (const unsigned long *)(buf), (nbits)) 296 + #define bitmap_to_arr64(buf, bitmap, nbits) \ 297 + bitmap_copy_clear_tail((unsigned long *)(buf), (const unsigned long *)(bitmap), (nbits)) 288 298 #endif 289 299 290 300 static inline int bitmap_and(unsigned long *dst, const unsigned long *src1, ··· 353 331 #endif 354 332 #define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1) 355 333 356 - static inline int bitmap_equal(const unsigned long *src1, 357 - const unsigned long *src2, unsigned int nbits) 334 + static inline bool bitmap_equal(const unsigned long *src1, 335 + const unsigned long *src2, unsigned int nbits) 358 336 { 359 337 if (small_const_nbits(nbits)) 360 338 return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); ··· 384 362 return !(((*src1 | *src2) ^ *src3) & BITMAP_LAST_WORD_MASK(nbits)); 385 363 } 386 364 387 - static inline int bitmap_intersects(const unsigned long *src1, 388 - const unsigned long *src2, unsigned int nbits) 365 + static inline bool bitmap_intersects(const unsigned long *src1, 366 + const unsigned long *src2, 367 + unsigned int nbits) 389 368 { 390 369 if (small_const_nbits(nbits)) 391 370 return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; ··· 394 371 return __bitmap_intersects(src1, src2, nbits); 395 372 } 396 373 397 - static inline int bitmap_subset(const unsigned long *src1, 398 - const unsigned long *src2, unsigned int nbits) 374 + static inline bool bitmap_subset(const unsigned long *src1, 375 + const unsigned long *src2, unsigned int nbits) 399 376 { 400 377 if (small_const_nbits(nbits)) 401 378 return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits)); ··· 537 514 */ 538 515 static inline void bitmap_from_u64(unsigned long *dst, u64 mask) 539 516 { 540 - dst[0] = mask & ULONG_MAX; 541 - 542 - if (sizeof(mask) > sizeof(unsigned long)) 543 - dst[1] = mask >> 32; 517 + bitmap_from_arr64(dst, &mask, 64); 544 518 } 545 519 546 520 /**
+3 -3
include/linux/find.h
··· 21 21 /** 22 22 * find_next_bit - find the next set bit in a memory region 23 23 * @addr: The address to base the search on 24 - * @offset: The bitnumber to start searching at 25 24 * @size: The bitmap size in bits 25 + * @offset: The bitnumber to start searching at 26 26 * 27 27 * Returns the bit number for the next set bit 28 28 * If no bits are set, returns @size. ··· 50 50 * find_next_and_bit - find the next set bit in both memory regions 51 51 * @addr1: The first address to base the search on 52 52 * @addr2: The second address to base the search on 53 - * @offset: The bitnumber to start searching at 54 53 * @size: The bitmap size in bits 54 + * @offset: The bitnumber to start searching at 55 55 * 56 56 * Returns the bit number for the next set bit 57 57 * If no bits are set, returns @size. ··· 79 79 /** 80 80 * find_next_zero_bit - find the next cleared bit in a memory region 81 81 * @addr: The address to base the search on 82 - * @offset: The bitnumber to start searching at 83 82 * @size: The bitmap size in bits 83 + * @offset: The bitnumber to start searching at 84 84 * 85 85 * Returns the bit number of the next zero bit 86 86 * If no bits are zero, returns @size.
+19 -19
include/linux/nodemask.h
··· 42 42 * void nodes_shift_right(dst, src, n) Shift right 43 43 * void nodes_shift_left(dst, src, n) Shift left 44 44 * 45 - * int first_node(mask) Number lowest set bit, or MAX_NUMNODES 46 - * int next_node(node, mask) Next node past 'node', or MAX_NUMNODES 47 - * int next_node_in(node, mask) Next node past 'node', or wrap to first, 45 + * unsigned int first_node(mask) Number lowest set bit, or MAX_NUMNODES 46 + * unsigend int next_node(node, mask) Next node past 'node', or MAX_NUMNODES 47 + * unsigned int next_node_in(node, mask) Next node past 'node', or wrap to first, 48 48 * or MAX_NUMNODES 49 - * int first_unset_node(mask) First node not set in mask, or 49 + * unsigned int first_unset_node(mask) First node not set in mask, or 50 50 * MAX_NUMNODES 51 51 * 52 52 * nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set ··· 153 153 154 154 #define node_test_and_set(node, nodemask) \ 155 155 __node_test_and_set((node), &(nodemask)) 156 - static inline int __node_test_and_set(int node, nodemask_t *addr) 156 + static inline bool __node_test_and_set(int node, nodemask_t *addr) 157 157 { 158 158 return test_and_set_bit(node, addr->bits); 159 159 } ··· 200 200 201 201 #define nodes_equal(src1, src2) \ 202 202 __nodes_equal(&(src1), &(src2), MAX_NUMNODES) 203 - static inline int __nodes_equal(const nodemask_t *src1p, 203 + static inline bool __nodes_equal(const nodemask_t *src1p, 204 204 const nodemask_t *src2p, unsigned int nbits) 205 205 { 206 206 return bitmap_equal(src1p->bits, src2p->bits, nbits); ··· 208 208 209 209 #define nodes_intersects(src1, src2) \ 210 210 __nodes_intersects(&(src1), &(src2), MAX_NUMNODES) 211 - static inline int __nodes_intersects(const nodemask_t *src1p, 211 + static inline bool __nodes_intersects(const nodemask_t *src1p, 212 212 const nodemask_t *src2p, unsigned int nbits) 213 213 { 214 214 return bitmap_intersects(src1p->bits, src2p->bits, nbits); ··· 216 216 217 217 #define nodes_subset(src1, src2) \ 218 218 __nodes_subset(&(src1), &(src2), MAX_NUMNODES) 219 - static inline int __nodes_subset(const nodemask_t *src1p, 219 + static inline bool __nodes_subset(const nodemask_t *src1p, 220 220 const nodemask_t *src2p, unsigned int nbits) 221 221 { 222 222 return bitmap_subset(src1p->bits, src2p->bits, nbits); 223 223 } 224 224 225 225 #define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES) 226 - static inline int __nodes_empty(const nodemask_t *srcp, unsigned int nbits) 226 + static inline bool __nodes_empty(const nodemask_t *srcp, unsigned int nbits) 227 227 { 228 228 return bitmap_empty(srcp->bits, nbits); 229 229 } 230 230 231 231 #define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES) 232 - static inline int __nodes_full(const nodemask_t *srcp, unsigned int nbits) 232 + static inline bool __nodes_full(const nodemask_t *srcp, unsigned int nbits) 233 233 { 234 234 return bitmap_full(srcp->bits, nbits); 235 235 } ··· 260 260 > MAX_NUMNODES, then the silly min_ts could be dropped. */ 261 261 262 262 #define first_node(src) __first_node(&(src)) 263 - static inline int __first_node(const nodemask_t *srcp) 263 + static inline unsigned int __first_node(const nodemask_t *srcp) 264 264 { 265 - return min_t(int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES)); 265 + return min_t(unsigned int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES)); 266 266 } 267 267 268 268 #define next_node(n, src) __next_node((n), &(src)) 269 - static inline int __next_node(int n, const nodemask_t *srcp) 269 + static inline unsigned int __next_node(int n, const nodemask_t *srcp) 270 270 { 271 - return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); 271 + return min_t(unsigned int, MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); 272 272 } 273 273 274 274 /* ··· 276 276 * the first node in src if needed. Returns MAX_NUMNODES if src is empty. 277 277 */ 278 278 #define next_node_in(n, src) __next_node_in((n), &(src)) 279 - int __next_node_in(int node, const nodemask_t *srcp); 279 + unsigned int __next_node_in(int node, const nodemask_t *srcp); 280 280 281 281 static inline void init_nodemask_of_node(nodemask_t *mask, int node) 282 282 { ··· 296 296 }) 297 297 298 298 #define first_unset_node(mask) __first_unset_node(&(mask)) 299 - static inline int __first_unset_node(const nodemask_t *maskp) 299 + static inline unsigned int __first_unset_node(const nodemask_t *maskp) 300 300 { 301 - return min_t(int,MAX_NUMNODES, 301 + return min_t(unsigned int, MAX_NUMNODES, 302 302 find_first_zero_bit(maskp->bits, MAX_NUMNODES)); 303 303 } 304 304 ··· 435 435 436 436 #define first_online_node first_node(node_states[N_ONLINE]) 437 437 #define first_memory_node first_node(node_states[N_MEMORY]) 438 - static inline int next_online_node(int nid) 438 + static inline unsigned int next_online_node(int nid) 439 439 { 440 440 return next_node(nid, node_states[N_ONLINE]); 441 441 } 442 - static inline int next_memory_node(int nid) 442 + static inline unsigned int next_memory_node(int nid) 443 443 { 444 444 return next_node(nid, node_states[N_MEMORY]); 445 445 }
+89 -28
lib/bitmap.c
··· 45 45 * for the best explanations of this ordering. 46 46 */ 47 47 48 - int __bitmap_equal(const unsigned long *bitmap1, 49 - const unsigned long *bitmap2, unsigned int bits) 48 + bool __bitmap_equal(const unsigned long *bitmap1, 49 + const unsigned long *bitmap2, unsigned int bits) 50 50 { 51 51 unsigned int k, lim = bits/BITS_PER_LONG; 52 52 for (k = 0; k < lim; ++k) 53 53 if (bitmap1[k] != bitmap2[k]) 54 - return 0; 54 + return false; 55 55 56 56 if (bits % BITS_PER_LONG) 57 57 if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) 58 - return 0; 58 + return false; 59 59 60 - return 1; 60 + return true; 61 61 } 62 62 EXPORT_SYMBOL(__bitmap_equal); 63 63 ··· 303 303 } 304 304 EXPORT_SYMBOL(__bitmap_replace); 305 305 306 - int __bitmap_intersects(const unsigned long *bitmap1, 307 - const unsigned long *bitmap2, unsigned int bits) 306 + bool __bitmap_intersects(const unsigned long *bitmap1, 307 + const unsigned long *bitmap2, unsigned int bits) 308 308 { 309 309 unsigned int k, lim = bits/BITS_PER_LONG; 310 310 for (k = 0; k < lim; ++k) 311 311 if (bitmap1[k] & bitmap2[k]) 312 - return 1; 312 + return true; 313 313 314 314 if (bits % BITS_PER_LONG) 315 315 if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) 316 - return 1; 317 - return 0; 316 + return true; 317 + return false; 318 318 } 319 319 EXPORT_SYMBOL(__bitmap_intersects); 320 320 321 - int __bitmap_subset(const unsigned long *bitmap1, 322 - const unsigned long *bitmap2, unsigned int bits) 321 + bool __bitmap_subset(const unsigned long *bitmap1, 322 + const unsigned long *bitmap2, unsigned int bits) 323 323 { 324 324 unsigned int k, lim = bits/BITS_PER_LONG; 325 325 for (k = 0; k < lim; ++k) 326 326 if (bitmap1[k] & ~bitmap2[k]) 327 - return 0; 327 + return false; 328 328 329 329 if (bits % BITS_PER_LONG) 330 330 if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) 331 - return 0; 332 - return 1; 331 + return false; 332 + return true; 333 333 } 334 334 EXPORT_SYMBOL(__bitmap_subset); 335 335 ··· 527 527 * cpumap_print_to_pagebuf() or directly by drivers to export hexadecimal 528 528 * bitmask and decimal list to userspace by sysfs ABI. 529 529 * Drivers might be using a normal attribute for this kind of ABIs. A 530 - * normal attribute typically has show entry as below: 531 - * static ssize_t example_attribute_show(struct device *dev, 530 + * normal attribute typically has show entry as below:: 531 + * 532 + * static ssize_t example_attribute_show(struct device *dev, 532 533 * struct device_attribute *attr, char *buf) 533 - * { 534 + * { 534 535 * ... 535 536 * return bitmap_print_to_pagebuf(true, buf, &mask, nr_trig_max); 536 - * } 537 + * } 538 + * 537 539 * show entry of attribute has no offset and count parameters and this 538 540 * means the file is limited to one page only. 539 541 * bitmap_print_to_pagebuf() API works terribly well for this kind of 540 - * normal attribute with buf parameter and without offset, count: 541 - * bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, 542 + * normal attribute with buf parameter and without offset, count:: 543 + * 544 + * bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, 542 545 * int nmaskbits) 543 - * { 544 - * } 546 + * { 547 + * } 548 + * 545 549 * The problem is once we have a large bitmap, we have a chance to get a 546 550 * bitmask or list more than one page. Especially for list, it could be 547 551 * as complex as 0,3,5,7,9,... We have no simple way to know it exact size. 548 552 * It turns out bin_attribute is a way to break this limit. bin_attribute 549 - * has show entry as below: 550 - * static ssize_t 551 - * example_bin_attribute_show(struct file *filp, struct kobject *kobj, 553 + * has show entry as below:: 554 + * 555 + * static ssize_t 556 + * example_bin_attribute_show(struct file *filp, struct kobject *kobj, 552 557 * struct bin_attribute *attr, char *buf, 553 558 * loff_t offset, size_t count) 554 - * { 559 + * { 555 560 * ... 556 - * } 561 + * } 562 + * 557 563 * With the new offset and count parameters, this makes sysfs ABI be able 558 564 * to support file size more than one page. For example, offset could be 559 565 * >= 4096. ··· 583 577 * This function is not a replacement for sprintf() or bitmap_print_to_pagebuf(). 584 578 * It is intended to workaround sysfs limitations discussed above and should be 585 579 * used carefully in general case for the following reasons: 580 + * 586 581 * - Time complexity is O(nbits^2/count), comparing to O(nbits) for snprintf(). 587 582 * - Memory complexity is O(nbits), comparing to O(1) for snprintf(). 588 583 * - @off and @count are NOT offset and number of bits to print. ··· 1512 1505 buf[halfwords - 1] &= (u32) (UINT_MAX >> ((-nbits) & 31)); 1513 1506 } 1514 1507 EXPORT_SYMBOL(bitmap_to_arr32); 1508 + #endif 1515 1509 1510 + #if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN) 1511 + /** 1512 + * bitmap_from_arr64 - copy the contents of u64 array of bits to bitmap 1513 + * @bitmap: array of unsigned longs, the destination bitmap 1514 + * @buf: array of u64 (in host byte order), the source bitmap 1515 + * @nbits: number of bits in @bitmap 1516 + */ 1517 + void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits) 1518 + { 1519 + int n; 1520 + 1521 + for (n = nbits; n > 0; n -= 64) { 1522 + u64 val = *buf++; 1523 + 1524 + *bitmap++ = val; 1525 + if (n > 32) 1526 + *bitmap++ = val >> 32; 1527 + } 1528 + 1529 + /* 1530 + * Clear tail bits in the last word beyond nbits. 1531 + * 1532 + * Negative index is OK because here we point to the word next 1533 + * to the last word of the bitmap, except for nbits == 0, which 1534 + * is tested implicitly. 1535 + */ 1536 + if (nbits % BITS_PER_LONG) 1537 + bitmap[-1] &= BITMAP_LAST_WORD_MASK(nbits); 1538 + } 1539 + EXPORT_SYMBOL(bitmap_from_arr64); 1540 + 1541 + /** 1542 + * bitmap_to_arr64 - copy the contents of bitmap to a u64 array of bits 1543 + * @buf: array of u64 (in host byte order), the dest bitmap 1544 + * @bitmap: array of unsigned longs, the source bitmap 1545 + * @nbits: number of bits in @bitmap 1546 + */ 1547 + void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits) 1548 + { 1549 + const unsigned long *end = bitmap + BITS_TO_LONGS(nbits); 1550 + 1551 + while (bitmap < end) { 1552 + *buf = *bitmap++; 1553 + if (bitmap < end) 1554 + *buf |= (u64)(*bitmap++) << 32; 1555 + buf++; 1556 + } 1557 + 1558 + /* Clear tail bits in the last element of array beyond nbits. */ 1559 + if (nbits % 64) 1560 + buf[-1] &= GENMASK_ULL(nbits % 64, 0); 1561 + } 1562 + EXPORT_SYMBOL(bitmap_to_arr64); 1516 1563 #endif
+2 -2
lib/nodemask.c
··· 3 3 #include <linux/module.h> 4 4 #include <linux/random.h> 5 5 6 - int __next_node_in(int node, const nodemask_t *srcp) 6 + unsigned int __next_node_in(int node, const nodemask_t *srcp) 7 7 { 8 - int ret = __next_node(node, srcp); 8 + unsigned int ret = __next_node(node, srcp); 9 9 10 10 if (ret == MAX_NUMNODES) 11 11 ret = __first_node(srcp);
+25
lib/test_bitmap.c
··· 585 585 } 586 586 } 587 587 588 + static void __init test_bitmap_arr64(void) 589 + { 590 + unsigned int nbits, next_bit; 591 + u64 arr[EXP1_IN_BITS / 64]; 592 + DECLARE_BITMAP(bmap2, EXP1_IN_BITS); 593 + 594 + memset(arr, 0xa5, sizeof(arr)); 595 + 596 + for (nbits = 0; nbits < EXP1_IN_BITS; ++nbits) { 597 + memset(bmap2, 0xff, sizeof(arr)); 598 + bitmap_to_arr64(arr, exp1, nbits); 599 + bitmap_from_arr64(bmap2, arr, nbits); 600 + expect_eq_bitmap(bmap2, exp1, nbits); 601 + 602 + next_bit = find_next_bit(bmap2, round_up(nbits, BITS_PER_LONG), nbits); 603 + if (next_bit < round_up(nbits, BITS_PER_LONG)) 604 + pr_err("bitmap_copy_arr64(nbits == %d:" 605 + " tail is not safely cleared: %d\n", nbits, next_bit); 606 + 607 + if (nbits < EXP1_IN_BITS - 64) 608 + expect_eq_uint(arr[DIV_ROUND_UP(nbits, 64)], 0xa5a5a5a5); 609 + } 610 + } 611 + 588 612 static void noinline __init test_mem_optimisations(void) 589 613 { 590 614 DECLARE_BITMAP(bmap1, 1024); ··· 876 852 test_copy(); 877 853 test_replace(); 878 854 test_bitmap_arr32(); 855 + test_bitmap_arr64(); 879 856 test_bitmap_parse(); 880 857 test_bitmap_parselist(); 881 858 test_bitmap_printlist();
+2 -2
mm/vmstat.c
··· 2049 2049 int node; 2050 2050 2051 2051 for_each_online_node(node) { 2052 - if (cpumask_weight(cpumask_of_node(node)) > 0) 2052 + if (!cpumask_empty(cpumask_of_node(node))) 2053 2053 node_set_state(node, N_CPU); 2054 2054 } 2055 2055 } ··· 2081 2081 2082 2082 refresh_zone_stat_thresholds(); 2083 2083 node_cpus = cpumask_of_node(node); 2084 - if (cpumask_weight(node_cpus) > 0) 2084 + if (!cpumask_empty(node_cpus)) 2085 2085 return 0; 2086 2086 2087 2087 node_clear_state(node, N_CPU);
+9 -8
tools/include/linux/bitmap.h
··· 16 16 const unsigned long *bitmap2, int bits); 17 17 int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, 18 18 const unsigned long *bitmap2, unsigned int bits); 19 - int __bitmap_equal(const unsigned long *bitmap1, 20 - const unsigned long *bitmap2, unsigned int bits); 19 + bool __bitmap_equal(const unsigned long *bitmap1, 20 + const unsigned long *bitmap2, unsigned int bits); 21 21 void bitmap_clear(unsigned long *map, unsigned int start, int len); 22 - int __bitmap_intersects(const unsigned long *bitmap1, 23 - const unsigned long *bitmap2, unsigned int bits); 22 + bool __bitmap_intersects(const unsigned long *bitmap1, 23 + const unsigned long *bitmap2, unsigned int bits); 24 24 25 25 #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) 26 26 #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) ··· 162 162 #define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1) 163 163 #define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) 164 164 165 - static inline int bitmap_equal(const unsigned long *src1, 166 - const unsigned long *src2, unsigned int nbits) 165 + static inline bool bitmap_equal(const unsigned long *src1, 166 + const unsigned long *src2, unsigned int nbits) 167 167 { 168 168 if (small_const_nbits(nbits)) 169 169 return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); ··· 173 173 return __bitmap_equal(src1, src2, nbits); 174 174 } 175 175 176 - static inline int bitmap_intersects(const unsigned long *src1, 177 - const unsigned long *src2, unsigned int nbits) 176 + static inline bool bitmap_intersects(const unsigned long *src1, 177 + const unsigned long *src2, 178 + unsigned int nbits) 178 179 { 179 180 if (small_const_nbits(nbits)) 180 181 return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
+10 -10
tools/lib/bitmap.c
··· 72 72 return result != 0; 73 73 } 74 74 75 - int __bitmap_equal(const unsigned long *bitmap1, 76 - const unsigned long *bitmap2, unsigned int bits) 75 + bool __bitmap_equal(const unsigned long *bitmap1, 76 + const unsigned long *bitmap2, unsigned int bits) 77 77 { 78 78 unsigned int k, lim = bits/BITS_PER_LONG; 79 79 for (k = 0; k < lim; ++k) 80 80 if (bitmap1[k] != bitmap2[k]) 81 - return 0; 81 + return false; 82 82 83 83 if (bits % BITS_PER_LONG) 84 84 if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) 85 - return 0; 85 + return false; 86 86 87 - return 1; 87 + return true; 88 88 } 89 89 90 - int __bitmap_intersects(const unsigned long *bitmap1, 91 - const unsigned long *bitmap2, unsigned int bits) 90 + bool __bitmap_intersects(const unsigned long *bitmap1, 91 + const unsigned long *bitmap2, unsigned int bits) 92 92 { 93 93 unsigned int k, lim = bits/BITS_PER_LONG; 94 94 for (k = 0; k < lim; ++k) 95 95 if (bitmap1[k] & bitmap2[k]) 96 - return 1; 96 + return true; 97 97 98 98 if (bits % BITS_PER_LONG) 99 99 if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) 100 - return 1; 101 - return 0; 100 + return true; 101 + return false; 102 102 }