Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'bitmap-for-6.15' of https://github.com/norov/linux

Pull bitmap updates from Yury Norov:

- cpumask_next_wrap() rework (me)

- GENMASK() simplification (I Hsin)

- rust bindings for cpumasks (Viresh and me)

- scattered cleanups (Andy, Tamir, Vincent, Ignacio and Joel)

* tag 'bitmap-for-6.15' of https://github.com/norov/linux: (22 commits)
cpumask: align text in comment
riscv: fix test_and_{set,clear}_bit ordering documentation
treewide: fix typo 'unsigned __init128' -> 'unsigned __int128'
MAINTAINERS: add rust bindings entry for bitmap API
rust: Add cpumask helpers
uapi: Revert "bitops: avoid integer overflow in GENMASK(_ULL)"
cpumask: drop cpumask_next_wrap_old()
PCI: hv: Switch hv_compose_multi_msi_req_get_cpu() to using cpumask_next_wrap()
scsi: lpfc: rework lpfc_next_{online,present}_cpu()
scsi: lpfc: switch lpfc_irq_rebalance() to using cpumask_next_wrap()
s390: switch stop_machine_yield() to using cpumask_next_wrap()
padata: switch padata_find_next() to using cpumask_next_wrap()
cpumask: use cpumask_next_wrap() where appropriate
cpumask: re-introduce cpumask_next{,_and}_wrap()
cpumask: deprecate cpumask_next_wrap()
powerpc/xmon: simplify xmon_batch_next_cpu()
ibmvnic: simplify ibmvnic_set_queue_affinity()
virtio_net: simplify virtnet_set_affinity()
objpool: rework objpool_pop()
cpumask: add for_each_{possible,online}_cpu_wrap
...

+147 -144
+5
MAINTAINERS
··· 4026 4026 F: tools/lib/bitmap.c 4027 4027 F: tools/lib/find_bit.c 4028 4028 4029 + BITMAP API BINDINGS [RUST] 4030 + M: Yury Norov <yury.norov@gmail.com> 4031 + S: Maintained 4032 + F: rust/helpers/cpumask.c 4033 + 4029 4034 BITOPS API 4030 4035 M: Yury Norov <yury.norov@gmail.com> 4031 4036 R: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+1 -5
arch/powerpc/xmon/xmon.c
··· 1271 1271 { 1272 1272 unsigned long cpu; 1273 1273 1274 - while (!cpumask_empty(&xmon_batch_cpus)) { 1275 - cpu = cpumask_next_wrap(smp_processor_id(), &xmon_batch_cpus, 1276 - xmon_batch_start_cpu, true); 1277 - if (cpu >= nr_cpu_ids) 1278 - break; 1274 + for_each_cpu_wrap(cpu, &xmon_batch_cpus, xmon_batch_start_cpu) { 1279 1275 if (xmon_batch_start_cpu == -1) 1280 1276 xmon_batch_start_cpu = cpu; 1281 1277 if (xmon_switch_cpu(cpu))
+2 -2
arch/riscv/include/asm/bitops.h
··· 226 226 * @nr: Bit to set 227 227 * @addr: Address to count from 228 228 * 229 - * This operation may be reordered on other architectures than x86. 229 + * This is an atomic fully-ordered operation (implied full memory barrier). 230 230 */ 231 231 static __always_inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr) 232 232 { ··· 238 238 * @nr: Bit to clear 239 239 * @addr: Address to count from 240 240 * 241 - * This operation can be reordered on other architectures other than x86. 241 + * This is an atomic fully-ordered operation (implied full memory barrier). 242 242 */ 243 243 static __always_inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr) 244 244 {
+1 -1
arch/s390/kernel/processor.c
··· 72 72 this_cpu = smp_processor_id(); 73 73 if (__this_cpu_inc_return(cpu_relax_retry) >= spin_retry) { 74 74 __this_cpu_write(cpu_relax_retry, 0); 75 - cpu = cpumask_next_wrap(this_cpu, cpumask, this_cpu, false); 75 + cpu = cpumask_next_wrap(this_cpu, cpumask); 76 76 if (cpu >= nr_cpu_ids) 77 77 return; 78 78 if (arch_vcpu_is_preempted(cpu))
+11 -7
drivers/net/ethernet/ibm/ibmvnic.c
··· 234 234 (*stragglers)--; 235 235 } 236 236 /* atomic write is safer than writing bit by bit directly */ 237 - for (i = 0; i < stride; i++) { 238 - cpumask_set_cpu(*cpu, mask); 239 - *cpu = cpumask_next_wrap(*cpu, cpu_online_mask, 240 - nr_cpu_ids, false); 237 + for_each_online_cpu_wrap(i, *cpu) { 238 + if (!stride--) { 239 + /* For the next queue we start from the first 240 + * unused CPU in this queue 241 + */ 242 + *cpu = i; 243 + break; 244 + } 245 + cpumask_set_cpu(i, mask); 241 246 } 247 + 242 248 /* set queue affinity mask */ 243 249 cpumask_copy(queue->affinity_mask, mask); 244 250 rc = irq_set_affinity_and_hint(queue->irq, queue->affinity_mask); ··· 262 256 int num_rxqs = adapter->num_active_rx_scrqs, i_rxqs = 0; 263 257 int num_txqs = adapter->num_active_tx_scrqs, i_txqs = 0; 264 258 int total_queues, stride, stragglers, i; 265 - unsigned int num_cpu, cpu; 259 + unsigned int num_cpu, cpu = 0; 266 260 bool is_rx_queue; 267 261 int rc = 0; 268 262 ··· 280 274 stride = max_t(int, num_cpu / total_queues, 1); 281 275 /* number of leftover cpu's */ 282 276 stragglers = num_cpu >= total_queues ? num_cpu % total_queues : 0; 283 - /* next available cpu to assign irq to */ 284 - cpu = cpumask_next(-1, cpu_online_mask); 285 277 286 278 for (i = 0; i < total_queues; i++) { 287 279 is_rx_queue = false;
+7 -5
drivers/net/virtio_net.c
··· 3826 3826 cpumask_var_t mask; 3827 3827 int stragglers; 3828 3828 int group_size; 3829 - int i, j, cpu; 3829 + int i, start = 0, cpu; 3830 3830 int num_cpu; 3831 3831 int stride; 3832 3832 ··· 3840 3840 stragglers = num_cpu >= vi->curr_queue_pairs ? 3841 3841 num_cpu % vi->curr_queue_pairs : 3842 3842 0; 3843 - cpu = cpumask_first(cpu_online_mask); 3844 3843 3845 3844 for (i = 0; i < vi->curr_queue_pairs; i++) { 3846 3845 group_size = stride + (i < stragglers ? 1 : 0); 3847 3846 3848 - for (j = 0; j < group_size; j++) { 3847 + for_each_online_cpu_wrap(cpu, start) { 3848 + if (!group_size--) { 3849 + start = cpu; 3850 + break; 3851 + } 3849 3852 cpumask_set_cpu(cpu, mask); 3850 - cpu = cpumask_next_wrap(cpu, cpu_online_mask, 3851 - nr_cpu_ids, false); 3852 3853 } 3854 + 3853 3855 virtqueue_set_affinity(vi->rq[i].vq, mask); 3854 3856 virtqueue_set_affinity(vi->sq[i].vq, mask); 3855 3857 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
+1 -2
drivers/pci/controller/pci-hyperv.c
··· 1757 1757 1758 1758 spin_lock_irqsave(&multi_msi_cpu_lock, flags); 1759 1759 1760 - cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask, nr_cpu_ids, 1761 - false); 1760 + cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask); 1762 1761 cpu = cpu_next; 1763 1762 1764 1763 spin_unlock_irqrestore(&multi_msi_cpu_lock, flags);
+5 -18
drivers/scsi/lpfc/lpfc.h
··· 1715 1715 * Note: If no valid cpu found, then nr_cpu_ids is returned. 1716 1716 * 1717 1717 **/ 1718 - static inline unsigned int 1718 + static __always_inline unsigned int 1719 1719 lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start) 1720 1720 { 1721 - unsigned int cpu_it; 1722 - 1723 - for_each_cpu_wrap(cpu_it, mask, start) { 1724 - if (cpu_online(cpu_it)) 1725 - break; 1726 - } 1727 - 1728 - return cpu_it; 1721 + return cpumask_next_and_wrap(start, mask, cpu_online_mask); 1729 1722 } 1723 + 1730 1724 /** 1731 1725 * lpfc_next_present_cpu - Finds next present CPU after n 1732 1726 * @n: the cpu prior to search ··· 1728 1734 * Note: If no next present cpu, then fallback to first present cpu. 1729 1735 * 1730 1736 **/ 1731 - static inline unsigned int lpfc_next_present_cpu(int n) 1737 + static __always_inline unsigned int lpfc_next_present_cpu(int n) 1732 1738 { 1733 - unsigned int cpu; 1734 - 1735 - cpu = cpumask_next(n, cpu_present_mask); 1736 - 1737 - if (cpu >= nr_cpu_ids) 1738 - cpu = cpumask_first(cpu_present_mask); 1739 - 1740 - return cpu; 1739 + return cpumask_next_wrap(n, cpu_present_mask); 1741 1740 } 1742 1741 1743 1742 /**
+1 -1
drivers/scsi/lpfc/lpfc_init.c
··· 12873 12873 12874 12874 if (offline) { 12875 12875 /* Find next online CPU on original mask */ 12876 - cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true); 12876 + cpu_next = cpumask_next_wrap(cpu, orig_mask); 12877 12877 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next); 12878 12878 12879 12879 /* Found a valid CPU */
+5 -3
include/linux/bitmap.h
··· 560 560 * ...0..11...0..10 561 561 * dst: 0000001100000010 562 562 * 563 - * A relationship exists between bitmap_scatter() and bitmap_gather(). 563 + * A relationship exists between bitmap_scatter() and bitmap_gather(). See 564 + * bitmap_gather() for the bitmap gather detailed operations. TL;DR: 564 565 * bitmap_gather() can be seen as the 'reverse' bitmap_scatter() operation. 565 - * See bitmap_scatter() for details related to this relationship. 566 566 */ 567 567 static __always_inline 568 568 void bitmap_scatter(unsigned long *dst, const unsigned long *src, ··· 608 608 * dst: 0000000000011010 609 609 * 610 610 * A relationship exists between bitmap_gather() and bitmap_scatter(). See 611 - * bitmap_scatter() for the bitmap scatter detailed operations. 611 + * bitmap_scatter() for the bitmap scatter detailed operations. TL;DR: 612 + * bitmap_scatter() can be seen as the 'reverse' bitmap_gather() operation. 613 + * 612 614 * Suppose scattered computed using bitmap_scatter(scattered, src, mask, n). 613 615 * The operation bitmap_gather(result, scattered, mask, n) leads to a result 614 616 * equal or equivalent to src.
+1 -1
include/linux/bits.h
··· 40 40 * Missing asm support 41 41 * 42 42 * __GENMASK_U128() depends on _BIT128() which would not work 43 - * in the asm code, as it shifts an 'unsigned __init128' data 43 + * in the asm code, as it shifts an 'unsigned __int128' data 44 44 * type instead of direct representation of 128 bit constants 45 45 * such as long and unsigned long. The fundamental problem is 46 46 * that a 128 bit constant will get silently truncated by the
+49 -22
include/linux/cpumask.h
··· 81 81 * 82 82 * cpu_possible_mask- has bit 'cpu' set iff cpu is populatable 83 83 * cpu_present_mask - has bit 'cpu' set iff cpu is populated 84 - * cpu_enabled_mask - has bit 'cpu' set iff cpu can be brought online 84 + * cpu_enabled_mask - has bit 'cpu' set iff cpu can be brought online 85 85 * cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler 86 86 * cpu_active_mask - has bit 'cpu' set iff cpu available to migration 87 87 * ··· 285 285 } 286 286 287 287 /** 288 + * cpumask_next_and_wrap - get the next cpu in *src1p & *src2p, starting from 289 + * @n+1. If nothing found, wrap around and start from 290 + * the beginning 291 + * @n: the cpu prior to the place to search (i.e. search starts from @n+1) 292 + * @src1p: the first cpumask pointer 293 + * @src2p: the second cpumask pointer 294 + * 295 + * Return: next set bit, wrapped if needed, or >= nr_cpu_ids if @src1p & @src2p is empty. 296 + */ 297 + static __always_inline 298 + unsigned int cpumask_next_and_wrap(int n, const struct cpumask *src1p, 299 + const struct cpumask *src2p) 300 + { 301 + /* -1 is a legal arg here. */ 302 + if (n != -1) 303 + cpumask_check(n); 304 + return find_next_and_bit_wrap(cpumask_bits(src1p), cpumask_bits(src2p), 305 + small_cpumask_bits, n + 1); 306 + } 307 + 308 + /** 309 + * cpumask_next_wrap - get the next cpu in *src, starting from @n+1. If nothing 310 + * found, wrap around and start from the beginning 311 + * @n: the cpu prior to the place to search (i.e. search starts from @n+1) 312 + * @src: cpumask pointer 313 + * 314 + * Return: next set bit, wrapped if needed, or >= nr_cpu_ids if @src is empty. 315 + */ 316 + static __always_inline 317 + unsigned int cpumask_next_wrap(int n, const struct cpumask *src) 318 + { 319 + /* -1 is a legal arg here. */ 320 + if (n != -1) 321 + cpumask_check(n); 322 + return find_next_bit_wrap(cpumask_bits(src), small_cpumask_bits, n + 1); 323 + } 324 + 325 + /** 288 326 * for_each_cpu - iterate over every cpu in a mask 289 327 * @cpu: the (optionally unsigned) integer iterator 290 328 * @mask: the cpumask pointer ··· 331 293 */ 332 294 #define for_each_cpu(cpu, mask) \ 333 295 for_each_set_bit(cpu, cpumask_bits(mask), small_cpumask_bits) 334 - 335 - #if NR_CPUS == 1 336 - static __always_inline 337 - unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap) 338 - { 339 - cpumask_check(start); 340 - if (n != -1) 341 - cpumask_check(n); 342 - 343 - /* 344 - * Return the first available CPU when wrapping, or when starting before cpu0, 345 - * since there is only one valid option. 346 - */ 347 - if (wrap && n >= 0) 348 - return nr_cpumask_bits; 349 - 350 - return cpumask_first(mask); 351 - } 352 - #else 353 - unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap); 354 - #endif 355 296 356 297 /** 357 298 * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location ··· 1050 1033 #define for_each_possible_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++) 1051 1034 #define for_each_online_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++) 1052 1035 #define for_each_present_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++) 1036 + 1037 + #define for_each_possible_cpu_wrap(cpu, start) \ 1038 + for ((void)(start), (cpu) = 0; (cpu) < 1; (cpu)++) 1039 + #define for_each_online_cpu_wrap(cpu, start) \ 1040 + for ((void)(start), (cpu) = 0; (cpu) < 1; (cpu)++) 1053 1041 #else 1054 1042 #define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask) 1055 1043 #define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask) 1056 1044 #define for_each_enabled_cpu(cpu) for_each_cpu((cpu), cpu_enabled_mask) 1057 1045 #define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask) 1046 + 1047 + #define for_each_possible_cpu_wrap(cpu, start) \ 1048 + for_each_cpu_wrap((cpu), cpu_possible_mask, (start)) 1049 + #define for_each_online_cpu_wrap(cpu, start) \ 1050 + for_each_cpu_wrap((cpu), cpu_online_mask, (start)) 1058 1051 #endif 1059 1052 1060 1053 /* Wrappers for arch boot code to manipulate normally-constant masks */
+3 -4
include/linux/objpool.h
··· 170 170 { 171 171 void *obj = NULL; 172 172 unsigned long flags; 173 - int i, cpu; 173 + int start, cpu; 174 174 175 175 /* disable local irq to avoid preemption & interruption */ 176 176 raw_local_irq_save(flags); 177 177 178 - cpu = raw_smp_processor_id(); 179 - for (i = 0; i < pool->nr_possible_cpus; i++) { 178 + start = raw_smp_processor_id(); 179 + for_each_possible_cpu_wrap(cpu, start) { 180 180 obj = __objpool_try_get_slot(pool, cpu); 181 181 if (obj) 182 182 break; 183 - cpu = cpumask_next_wrap(cpu, cpu_possible_mask, -1, 1); 184 183 } 185 184 raw_local_irq_restore(flags); 186 185
+2 -6
include/uapi/linux/bits.h
··· 4 4 #ifndef _UAPI_LINUX_BITS_H 5 5 #define _UAPI_LINUX_BITS_H 6 6 7 - #define __GENMASK(h, l) \ 8 - (((~_UL(0)) - (_UL(1) << (l)) + 1) & \ 9 - (~_UL(0) >> (__BITS_PER_LONG - 1 - (h)))) 7 + #define __GENMASK(h, l) (((~_UL(0)) << (l)) & (~_UL(0) >> (BITS_PER_LONG - 1 - (h)))) 10 8 11 - #define __GENMASK_ULL(h, l) \ 12 - (((~_ULL(0)) - (_ULL(1) << (l)) + 1) & \ 13 - (~_ULL(0) >> (__BITS_PER_LONG_LONG - 1 - (h)))) 9 + #define __GENMASK_ULL(h, l) (((~_ULL(0)) << (l)) & (~_ULL(0) >> (BITS_PER_LONG_LONG - 1 - (h)))) 14 10 15 11 #define __GENMASK_U128(h, l) \ 16 12 ((_BIT128((h)) << 1) - (_BIT128(l)))
+1 -1
include/uapi/linux/const.h
··· 33 33 * Missing asm support 34 34 * 35 35 * __BIT128() would not work in the asm code, as it shifts an 36 - * 'unsigned __init128' data type as direct representation of 36 + * 'unsigned __int128' data type as direct representation of 37 37 * 128 bit constants is not supported in the gcc compiler, as 38 38 * they get silently truncated. 39 39 *
+1 -1
kernel/padata.c
··· 290 290 if (remove_object) { 291 291 list_del_init(&padata->list); 292 292 ++pd->processed; 293 - pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false); 293 + pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu); 294 294 } 295 295 296 296 spin_unlock(&reorder->lock);
+2 -35
lib/cpumask.c
··· 7 7 #include <linux/memblock.h> 8 8 #include <linux/numa.h> 9 9 10 - /** 11 - * cpumask_next_wrap - helper to implement for_each_cpu_wrap 12 - * @n: the cpu prior to the place to search 13 - * @mask: the cpumask pointer 14 - * @start: the start point of the iteration 15 - * @wrap: assume @n crossing @start terminates the iteration 16 - * 17 - * Return: >= nr_cpu_ids on completion 18 - * 19 - * Note: the @wrap argument is required for the start condition when 20 - * we cannot assume @start is set in @mask. 21 - */ 22 - unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap) 23 - { 24 - unsigned int next; 25 - 26 - again: 27 - next = cpumask_next(n, mask); 28 - 29 - if (wrap && n < start && next >= start) { 30 - return nr_cpumask_bits; 31 - 32 - } else if (next >= nr_cpumask_bits) { 33 - wrap = true; 34 - n = -1; 35 - goto again; 36 - } 37 - 38 - return next; 39 - } 40 - EXPORT_SYMBOL(cpumask_next_wrap); 41 - 42 10 /* These are not inline because of header tangles. */ 43 11 #ifdef CONFIG_CPUMASK_OFFSTACK 44 12 /** ··· 139 171 /* NOTE: our first selection will skip 0. */ 140 172 prev = __this_cpu_read(distribute_cpu_mask_prev); 141 173 142 - next = find_next_and_bit_wrap(cpumask_bits(src1p), cpumask_bits(src2p), 143 - nr_cpumask_bits, prev + 1); 174 + next = cpumask_next_and_wrap(prev, src1p, src2p); 144 175 if (next < nr_cpu_ids) 145 176 __this_cpu_write(distribute_cpu_mask_prev, next); 146 177 ··· 159 192 160 193 /* NOTE: our first selection will skip 0. */ 161 194 prev = __this_cpu_read(distribute_cpu_mask_prev); 162 - next = find_next_bit_wrap(cpumask_bits(srcp), nr_cpumask_bits, prev + 1); 195 + next = cpumask_next_wrap(prev, srcp); 163 196 if (next < nr_cpu_ids) 164 197 __this_cpu_write(distribute_cpu_mask_prev, next); 165 198
-28
lib/test_bitmap.c
··· 100 100 return true; 101 101 } 102 102 103 - static bool __init 104 - __check_eq_u32_array(const char *srcfile, unsigned int line, 105 - const u32 *exp_arr, unsigned int exp_len, 106 - const u32 *arr, unsigned int len) __used; 107 - static bool __init 108 - __check_eq_u32_array(const char *srcfile, unsigned int line, 109 - const u32 *exp_arr, unsigned int exp_len, 110 - const u32 *arr, unsigned int len) 111 - { 112 - if (exp_len != len) { 113 - pr_warn("[%s:%u] array length differ: expected %u, got %u\n", 114 - srcfile, line, 115 - exp_len, len); 116 - return false; 117 - } 118 - 119 - if (memcmp(exp_arr, arr, len*sizeof(*arr))) { 120 - pr_warn("[%s:%u] array contents differ\n", srcfile, line); 121 - print_hex_dump(KERN_WARNING, " exp: ", DUMP_PREFIX_OFFSET, 122 - 32, 4, exp_arr, exp_len*sizeof(*exp_arr), false); 123 - print_hex_dump(KERN_WARNING, " got: ", DUMP_PREFIX_OFFSET, 124 - 32, 4, arr, len*sizeof(*arr), false); 125 - return false; 126 - } 127 - 128 - return true; 129 - } 130 - 131 103 static bool __init __check_eq_clump8(const char *srcfile, unsigned int line, 132 104 const unsigned int offset, 133 105 const unsigned int size,
+1
rust/bindings/bindings_helper.h
··· 10 10 #include <linux/blk-mq.h> 11 11 #include <linux/blk_types.h> 12 12 #include <linux/blkdev.h> 13 + #include <linux/cpumask.h> 13 14 #include <linux/cred.h> 14 15 #include <linux/device/faux.h> 15 16 #include <linux/errname.h>
+45
rust/helpers/cpumask.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <linux/cpumask.h> 4 + 5 + void rust_helper_cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) 6 + { 7 + cpumask_set_cpu(cpu, dstp); 8 + } 9 + 10 + void rust_helper_cpumask_clear_cpu(int cpu, struct cpumask *dstp) 11 + { 12 + cpumask_clear_cpu(cpu, dstp); 13 + } 14 + 15 + void rust_helper_cpumask_setall(struct cpumask *dstp) 16 + { 17 + cpumask_setall(dstp); 18 + } 19 + 20 + unsigned int rust_helper_cpumask_weight(struct cpumask *srcp) 21 + { 22 + return cpumask_weight(srcp); 23 + } 24 + 25 + void rust_helper_cpumask_copy(struct cpumask *dstp, const struct cpumask *srcp) 26 + { 27 + cpumask_copy(dstp, srcp); 28 + } 29 + 30 + bool rust_helper_alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 31 + { 32 + return alloc_cpumask_var(mask, flags); 33 + } 34 + 35 + bool rust_helper_zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 36 + { 37 + return zalloc_cpumask_var(mask, flags); 38 + } 39 + 40 + #ifndef CONFIG_CPUMASK_OFFSTACK 41 + void rust_helper_free_cpumask_var(cpumask_var_t mask) 42 + { 43 + free_cpumask_var(mask); 44 + } 45 + #endif
+1
rust/helpers/helpers.c
··· 11 11 #include "bug.c" 12 12 #include "build_assert.c" 13 13 #include "build_bug.c" 14 + #include "cpumask.c" 14 15 #include "cred.c" 15 16 #include "device.c" 16 17 #include "err.c"
+1 -1
tools/include/linux/bits.h
··· 41 41 * Missing asm support 42 42 * 43 43 * __GENMASK_U128() depends on _BIT128() which would not work 44 - * in the asm code, as it shifts an 'unsigned __init128' data 44 + * in the asm code, as it shifts an 'unsigned __int128' data 45 45 * type instead of direct representation of 128 bit constants 46 46 * such as long and unsigned long. The fundamental problem is 47 47 * that a 128 bit constant will get silently truncated by the
+1 -1
tools/include/uapi/linux/const.h
··· 33 33 * Missing asm support 34 34 * 35 35 * __BIT128() would not work in the asm code, as it shifts an 36 - * 'unsigned __init128' data type as direct representation of 36 + * 'unsigned __int128' data type as direct representation of 37 37 * 128 bit constants is not supported in the gcc compiler, as 38 38 * they get silently truncated. 39 39 *