Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: replace all open encodings for NUMA_NO_NODE

Patch series "Replace all open encodings for NUMA_NO_NODE", v3.

All these places for replacement were found by running the following
grep patterns on the entire kernel code. Please let me know if this
might have missed some instances. This might also have replaced some
false positives. I will appreciate suggestions, inputs and review.

1. git grep "nid == -1"
2. git grep "node == -1"
3. git grep "nid = -1"
4. git grep "node = -1"

This patch (of 2):

At present there are multiple places where invalid node number is
encoded as -1. Even though implicitly understood it is always better to
have macros in there. Replace these open encodings for an invalid node
number with the global macro NUMA_NO_NODE. This helps remove NUMA
related assumptions like 'invalid node' from various places redirecting
them to a common definition.

Link: http://lkml.kernel.org/r/1545127933-10711-2-git-send-email-anshuman.khandual@arm.com
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Acked-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> [ixgbe]
Acked-by: Jens Axboe <axboe@kernel.dk> [mtip32xx]
Acked-by: Vinod Koul <vkoul@kernel.org> [dmaengine.c]
Acked-by: Michael Ellerman <mpe@ellerman.id.au> [powerpc]
Acked-by: Doug Ledford <dledford@redhat.com> [drivers/infiniband]
Cc: Joseph Qi <jiangqi903@gmail.com>
Cc: Hans Verkuil <hverkuil@xs4all.nl>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Anshuman Khandual and committed by
Linus Torvalds
98fa15f3 6ade2032

+104 -74
+2 -1
arch/alpha/include/asm/topology.h
··· 4 4 5 5 #include <linux/smp.h> 6 6 #include <linux/threads.h> 7 + #include <linux/numa.h> 7 8 #include <asm/machvec.h> 8 9 9 10 #ifdef CONFIG_NUMA ··· 30 29 { 31 30 int cpu; 32 31 33 - if (node == -1) 32 + if (node == NUMA_NO_NODE) 34 33 return cpu_all_mask; 35 34 36 35 cpumask_clear(&node_to_cpumask_map[node]);
+1 -1
arch/ia64/kernel/numa.c
··· 74 74 cpumask_clear(&node_to_cpu_mask[node]); 75 75 76 76 for_each_possible_early_cpu(cpu) { 77 - node = -1; 77 + node = NUMA_NO_NODE; 78 78 for (i = 0; i < NR_CPUS; ++i) 79 79 if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) { 80 80 node = node_cpuid[i].nid;
+3 -3
arch/ia64/mm/discontig.c
··· 227 227 * CPUs are put into groups according to node. Walk cpu_map 228 228 * and create new groups at node boundaries. 229 229 */ 230 - prev_node = -1; 230 + prev_node = NUMA_NO_NODE; 231 231 ai->nr_groups = 0; 232 232 for (unit = 0; unit < nr_units; unit++) { 233 233 cpu = cpu_map[unit]; ··· 435 435 { 436 436 void *ptr = NULL; 437 437 u8 best = 0xff; 438 - int bestnode = -1, node, anynode = 0; 438 + int bestnode = NUMA_NO_NODE, node, anynode = 0; 439 439 440 440 for_each_online_node(node) { 441 441 if (node_isset(node, memory_less_mask)) ··· 447 447 anynode = node; 448 448 } 449 449 450 - if (bestnode == -1) 450 + if (bestnode == NUMA_NO_NODE) 451 451 bestnode = anynode; 452 452 453 453 ptr = memblock_alloc_try_nid(pernodesize, PERCPU_PAGE_SIZE,
+2 -1
arch/powerpc/include/asm/pci-bridge.h
··· 10 10 #include <linux/pci.h> 11 11 #include <linux/list.h> 12 12 #include <linux/ioport.h> 13 + #include <linux/numa.h> 13 14 14 15 struct device_node; 15 16 ··· 266 265 #ifdef CONFIG_NUMA 267 266 #define PHB_SET_NODE(PHB, NODE) ((PHB)->node = (NODE)) 268 267 #else 269 - #define PHB_SET_NODE(PHB, NODE) ((PHB)->node = -1) 268 + #define PHB_SET_NODE(PHB, NODE) ((PHB)->node = NUMA_NO_NODE) 270 269 #endif 271 270 272 271 #endif /* CONFIG_PPC64 */
+2 -1
arch/powerpc/kernel/paca.c
··· 11 11 #include <linux/export.h> 12 12 #include <linux/memblock.h> 13 13 #include <linux/sched/task.h> 14 + #include <linux/numa.h> 14 15 15 16 #include <asm/lppaca.h> 16 17 #include <asm/paca.h> ··· 37 36 * which will put its paca in the right place. 38 37 */ 39 38 if (cpu == boot_cpuid) { 40 - nid = -1; 39 + nid = NUMA_NO_NODE; 41 40 memblock_set_bottom_up(true); 42 41 } else { 43 42 nid = early_cpu_to_node(cpu);
+2 -1
arch/powerpc/kernel/pci-common.c
··· 32 32 #include <linux/vmalloc.h> 33 33 #include <linux/slab.h> 34 34 #include <linux/vgaarb.h> 35 + #include <linux/numa.h> 35 36 36 37 #include <asm/processor.h> 37 38 #include <asm/io.h> ··· 133 132 int nid = of_node_to_nid(dev); 134 133 135 134 if (nid < 0 || !node_online(nid)) 136 - nid = -1; 135 + nid = NUMA_NO_NODE; 137 136 138 137 PHB_SET_NODE(phb, nid); 139 138 }
+7 -7
arch/powerpc/mm/numa.c
··· 215 215 */ 216 216 static int associativity_to_nid(const __be32 *associativity) 217 217 { 218 - int nid = -1; 218 + int nid = NUMA_NO_NODE; 219 219 220 220 if (min_common_depth == -1) 221 221 goto out; ··· 225 225 226 226 /* POWER4 LPAR uses 0xffff as invalid node */ 227 227 if (nid == 0xffff || nid >= MAX_NUMNODES) 228 - nid = -1; 228 + nid = NUMA_NO_NODE; 229 229 230 230 if (nid > 0 && 231 231 of_read_number(associativity, 1) >= distance_ref_points_depth) { ··· 244 244 */ 245 245 static int of_node_to_nid_single(struct device_node *device) 246 246 { 247 - int nid = -1; 247 + int nid = NUMA_NO_NODE; 248 248 const __be32 *tmp; 249 249 250 250 tmp = of_get_associativity(device); ··· 256 256 /* Walk the device tree upwards, looking for an associativity id */ 257 257 int of_node_to_nid(struct device_node *device) 258 258 { 259 - int nid = -1; 259 + int nid = NUMA_NO_NODE; 260 260 261 261 of_node_get(device); 262 262 while (device) { ··· 454 454 */ 455 455 static int numa_setup_cpu(unsigned long lcpu) 456 456 { 457 - int nid = -1; 457 + int nid = NUMA_NO_NODE; 458 458 struct device_node *cpu; 459 459 460 460 /* ··· 930 930 { 931 931 struct drmem_lmb *lmb; 932 932 unsigned long lmb_size; 933 - int nid = -1; 933 + int nid = NUMA_NO_NODE; 934 934 935 935 lmb_size = drmem_lmb_size(); 936 936 ··· 960 960 static int hot_add_node_scn_to_nid(unsigned long scn_addr) 961 961 { 962 962 struct device_node *memory; 963 - int nid = -1; 963 + int nid = NUMA_NO_NODE; 964 964 965 965 for_each_node_by_type(memory, "memory") { 966 966 unsigned long start, size;
+3 -2
arch/powerpc/platforms/powernv/memtrace.c
··· 20 20 #include <linux/slab.h> 21 21 #include <linux/memory.h> 22 22 #include <linux/memory_hotplug.h> 23 + #include <linux/numa.h> 23 24 #include <asm/machdep.h> 24 25 #include <asm/debugfs.h> 25 26 ··· 224 223 ent = &memtrace_array[i]; 225 224 226 225 /* We have onlined this chunk previously */ 227 - if (ent->nid == -1) 226 + if (ent->nid == NUMA_NO_NODE) 228 227 continue; 229 228 230 229 /* Remove from io mappings */ ··· 258 257 */ 259 258 debugfs_remove_recursive(ent->dir); 260 259 pr_info("Added trace memory back to node %d\n", ent->nid); 261 - ent->size = ent->start = ent->nid = -1; 260 + ent->size = ent->start = ent->nid = NUMA_NO_NODE; 262 261 } 263 262 if (ret) 264 263 return ret;
+2 -1
arch/sparc/kernel/pci_fire.c
··· 11 11 #include <linux/export.h> 12 12 #include <linux/irq.h> 13 13 #include <linux/of_device.h> 14 + #include <linux/numa.h> 14 15 15 16 #include <asm/prom.h> 16 17 #include <asm/irq.h> ··· 417 416 struct device_node *dp = op->dev.of_node; 418 417 int err; 419 418 420 - pbm->numa_node = -1; 419 + pbm->numa_node = NUMA_NO_NODE; 421 420 422 421 pbm->pci_ops = &sun4u_pci_ops; 423 422 pbm->config_space_reg_bits = 12;
+2 -1
arch/sparc/kernel/pci_schizo.c
··· 12 12 #include <linux/export.h> 13 13 #include <linux/interrupt.h> 14 14 #include <linux/of_device.h> 15 + #include <linux/numa.h> 15 16 16 17 #include <asm/iommu.h> 17 18 #include <asm/irq.h> ··· 1348 1347 pbm->next = pci_pbm_root; 1349 1348 pci_pbm_root = pbm; 1350 1349 1351 - pbm->numa_node = -1; 1350 + pbm->numa_node = NUMA_NO_NODE; 1352 1351 1353 1352 pbm->pci_ops = &sun4u_pci_ops; 1354 1353 pbm->config_space_reg_bits = 8;
+2 -1
arch/sparc/kernel/psycho_common.c
··· 5 5 */ 6 6 #include <linux/kernel.h> 7 7 #include <linux/interrupt.h> 8 + #include <linux/numa.h> 8 9 9 10 #include <asm/upa.h> 10 11 ··· 455 454 struct device_node *dp = op->dev.of_node; 456 455 457 456 pbm->name = dp->full_name; 458 - pbm->numa_node = -1; 457 + pbm->numa_node = NUMA_NO_NODE; 459 458 pbm->chip_type = chip_type; 460 459 pbm->chip_version = of_getintprop_default(dp, "version#", 0); 461 460 pbm->chip_revision = of_getintprop_default(dp, "module-revision#", 0);
+2 -1
arch/sparc/kernel/sbus.c
··· 15 15 #include <linux/interrupt.h> 16 16 #include <linux/of.h> 17 17 #include <linux/of_device.h> 18 + #include <linux/numa.h> 18 19 19 20 #include <asm/page.h> 20 21 #include <asm/io.h> ··· 562 561 563 562 op->dev.archdata.iommu = iommu; 564 563 op->dev.archdata.stc = strbuf; 565 - op->dev.archdata.numa_node = -1; 564 + op->dev.archdata.numa_node = NUMA_NO_NODE; 566 565 567 566 reg_base = regs + SYSIO_IOMMUREG_BASE; 568 567 iommu->iommu_control = reg_base + IOMMU_CONTROL;
+3 -3
arch/sparc/mm/init_64.c
··· 976 976 { 977 977 int prev_nid, new_nid; 978 978 979 - prev_nid = -1; 979 + prev_nid = NUMA_NO_NODE; 980 980 for ( ; start < end; start += PAGE_SIZE) { 981 981 for (new_nid = 0; new_nid < num_node_masks; new_nid++) { 982 982 struct node_mem_mask *p = &node_masks[new_nid]; 983 983 984 984 if ((start & p->mask) == p->match) { 985 - if (prev_nid == -1) 985 + if (prev_nid == NUMA_NO_NODE) 986 986 prev_nid = new_nid; 987 987 break; 988 988 } ··· 1208 1208 md = mdesc_grab(); 1209 1209 1210 1210 count = 0; 1211 - nid = -1; 1211 + nid = NUMA_NO_NODE; 1212 1212 mdesc_for_each_node_by_name(md, grp, "group") { 1213 1213 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) { 1214 1214 nid = count;
+2 -1
arch/x86/include/asm/pci.h
··· 7 7 #include <linux/slab.h> 8 8 #include <linux/string.h> 9 9 #include <linux/scatterlist.h> 10 + #include <linux/numa.h> 10 11 #include <asm/io.h> 11 12 #include <asm/pat.h> 12 13 #include <asm/x86_init.h> ··· 142 141 int node; 143 142 144 143 node = __pcibus_to_node(bus); 145 - return (node == -1) ? cpu_online_mask : 144 + return (node == NUMA_NO_NODE) ? cpu_online_mask : 146 145 cpumask_of_node(node); 147 146 } 148 147 #endif
+4 -3
arch/x86/kernel/apic/x2apic_uv_x.c
··· 27 27 #include <linux/crash_dump.h> 28 28 #include <linux/reboot.h> 29 29 #include <linux/memory.h> 30 + #include <linux/numa.h> 30 31 31 32 #include <asm/uv/uv_mmrs.h> 32 33 #include <asm/uv/uv_hub.h> ··· 1391 1390 } 1392 1391 1393 1392 /* Set socket -> node values: */ 1394 - lnid = -1; 1393 + lnid = NUMA_NO_NODE; 1395 1394 for_each_present_cpu(cpu) { 1396 1395 int nid = cpu_to_node(cpu); 1397 1396 int apicid, sockid; ··· 1522 1521 new_hub->pnode = 0xffff; 1523 1522 1524 1523 new_hub->numa_blade_id = uv_node_to_blade_id(nodeid); 1525 - new_hub->memory_nid = -1; 1524 + new_hub->memory_nid = NUMA_NO_NODE; 1526 1525 new_hub->nr_possible_cpus = 0; 1527 1526 new_hub->nr_online_cpus = 0; 1528 1527 } ··· 1539 1538 1540 1539 uv_cpu_info_per(cpu)->p_uv_hub_info = uv_hub_info_list(nodeid); 1541 1540 uv_cpu_info_per(cpu)->blade_cpu_id = uv_cpu_hub_info(cpu)->nr_possible_cpus++; 1542 - if (uv_cpu_hub_info(cpu)->memory_nid == -1) 1541 + if (uv_cpu_hub_info(cpu)->memory_nid == NUMA_NO_NODE) 1543 1542 uv_cpu_hub_info(cpu)->memory_nid = cpu_to_node(cpu); 1544 1543 1545 1544 /* Init memoryless node: */
+2 -1
arch/x86/kernel/smpboot.c
··· 56 56 #include <linux/stackprotector.h> 57 57 #include <linux/gfp.h> 58 58 #include <linux/cpuidle.h> 59 + #include <linux/numa.h> 59 60 60 61 #include <asm/acpi.h> 61 62 #include <asm/desc.h> ··· 842 841 /* reduce the number of lines printed when booting a large cpu count system */ 843 842 static void announce_cpu(int cpu, int apicid) 844 843 { 845 - static int current_node = -1; 844 + static int current_node = NUMA_NO_NODE; 846 845 int node = early_cpu_to_node(cpu); 847 846 static int width, node_width; 848 847
+3 -2
drivers/block/mtip32xx/mtip32xx.c
··· 40 40 #include <linux/export.h> 41 41 #include <linux/debugfs.h> 42 42 #include <linux/prefetch.h> 43 + #include <linux/numa.h> 43 44 #include "mtip32xx.h" 44 45 45 46 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32) ··· 4019 4018 /* Helper for selecting a node in round robin mode */ 4020 4019 static inline int mtip_get_next_rr_node(void) 4021 4020 { 4022 - static int next_node = -1; 4021 + static int next_node = NUMA_NO_NODE; 4023 4022 4024 - if (next_node == -1) { 4023 + if (next_node == NUMA_NO_NODE) { 4025 4024 next_node = first_online_node; 4026 4025 return next_node; 4027 4026 }
+3 -1
drivers/dma/dmaengine.c
··· 63 63 #include <linux/acpi_dma.h> 64 64 #include <linux/of_dma.h> 65 65 #include <linux/mempool.h> 66 + #include <linux/numa.h> 66 67 67 68 static DEFINE_MUTEX(dma_list_mutex); 68 69 static DEFINE_IDA(dma_ida); ··· 387 386 static bool dma_chan_is_local(struct dma_chan *chan, int cpu) 388 387 { 389 388 int node = dev_to_node(chan->device->dev); 390 - return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node)); 389 + return node == NUMA_NO_NODE || 390 + cpumask_test_cpu(cpu, cpumask_of_node(node)); 391 391 } 392 392 393 393 /**
+2 -1
drivers/infiniband/hw/hfi1/affinity.c
··· 48 48 #include <linux/cpumask.h> 49 49 #include <linux/module.h> 50 50 #include <linux/interrupt.h> 51 + #include <linux/numa.h> 51 52 52 53 #include "hfi.h" 53 54 #include "affinity.h" ··· 778 777 _dev_comp_vect_cpu_mask_clean_up(dd, entry); 779 778 unlock: 780 779 mutex_unlock(&node_affinity.lock); 781 - dd->node = -1; 780 + dd->node = NUMA_NO_NODE; 782 781 } 783 782 784 783 /*
+2 -1
drivers/infiniband/hw/hfi1/init.c
··· 54 54 #include <linux/printk.h> 55 55 #include <linux/hrtimer.h> 56 56 #include <linux/bitmap.h> 57 + #include <linux/numa.h> 57 58 #include <rdma/rdma_vt.h> 58 59 59 60 #include "hfi.h" ··· 1304 1303 dd->unit = ret; 1305 1304 list_add(&dd->list, &hfi1_dev_list); 1306 1305 } 1307 - dd->node = -1; 1306 + dd->node = NUMA_NO_NODE; 1308 1307 1309 1308 spin_unlock_irqrestore(&hfi1_devs_lock, flags); 1310 1309 idr_preload_end();
+3 -2
drivers/iommu/dmar.c
··· 39 39 #include <linux/dmi.h> 40 40 #include <linux/slab.h> 41 41 #include <linux/iommu.h> 42 + #include <linux/numa.h> 42 43 #include <asm/irq_remapping.h> 43 44 #include <asm/iommu_table.h> 44 45 ··· 478 477 int node = acpi_map_pxm_to_node(rhsa->proximity_domain); 479 478 480 479 if (!node_online(node)) 481 - node = -1; 480 + node = NUMA_NO_NODE; 482 481 drhd->iommu->node = node; 483 482 return 0; 484 483 } ··· 1063 1062 iommu->msagaw = msagaw; 1064 1063 iommu->segment = drhd->segment; 1065 1064 1066 - iommu->node = -1; 1065 + iommu->node = NUMA_NO_NODE; 1067 1066 1068 1067 ver = readl(iommu->reg + DMAR_VER_REG); 1069 1068 pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
+2 -1
drivers/iommu/intel-iommu.c
··· 47 47 #include <linux/dma-contiguous.h> 48 48 #include <linux/dma-direct.h> 49 49 #include <linux/crash_dump.h> 50 + #include <linux/numa.h> 50 51 #include <asm/irq_remapping.h> 51 52 #include <asm/cacheflush.h> 52 53 #include <asm/iommu.h> ··· 1717 1716 return NULL; 1718 1717 1719 1718 memset(domain, 0, sizeof(*domain)); 1720 - domain->nid = -1; 1719 + domain->nid = NUMA_NO_NODE; 1721 1720 domain->flags = flags; 1722 1721 domain->has_iotlb_device = false; 1723 1722 INIT_LIST_HEAD(&domain->devices);
+2 -1
drivers/misc/sgi-xp/xpc_uv.c
··· 22 22 #include <linux/module.h> 23 23 #include <linux/err.h> 24 24 #include <linux/slab.h> 25 + #include <linux/numa.h> 25 26 #include <asm/uv/uv_hub.h> 26 27 #if defined CONFIG_X86_64 27 28 #include <asm/uv/bios.h> ··· 62 61 XPC_NOTIFY_MSG_SIZE_UV) 63 62 #define XPC_NOTIFY_IRQ_NAME "xpc_notify" 64 63 65 - static int xpc_mq_node = -1; 64 + static int xpc_mq_node = NUMA_NO_NODE; 66 65 67 66 static struct xpc_gru_mq_uv *xpc_activate_mq_uv; 68 67 static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
+3 -2
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 27 27 #include <linux/bpf.h> 28 28 #include <linux/bpf_trace.h> 29 29 #include <linux/atomic.h> 30 + #include <linux/numa.h> 30 31 #include <scsi/fc/fc_fcoe.h> 31 32 #include <net/udp_tunnel.h> 32 33 #include <net/pkt_cls.h> ··· 6419 6418 { 6420 6419 struct device *dev = tx_ring->dev; 6421 6420 int orig_node = dev_to_node(dev); 6422 - int ring_node = -1; 6421 + int ring_node = NUMA_NO_NODE; 6423 6422 int size; 6424 6423 6425 6424 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; ··· 6513 6512 { 6514 6513 struct device *dev = rx_ring->dev; 6515 6514 int orig_node = dev_to_node(dev); 6516 - int ring_node = -1; 6515 + int ring_node = NUMA_NO_NODE; 6517 6516 int size; 6518 6517 6519 6518 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
+1 -1
include/linux/device.h
··· 1095 1095 #else 1096 1096 static inline int dev_to_node(struct device *dev) 1097 1097 { 1098 - return -1; 1098 + return NUMA_NO_NODE; 1099 1099 } 1100 1100 static inline void set_dev_node(struct device *dev, int node) 1101 1101 {
+2 -1
init/init_task.c
··· 10 10 #include <linux/fs.h> 11 11 #include <linux/mm.h> 12 12 #include <linux/audit.h> 13 + #include <linux/numa.h> 13 14 14 15 #include <asm/pgtable.h> 15 16 #include <linux/uaccess.h> ··· 155 154 .vtime.state = VTIME_SYS, 156 155 #endif 157 156 #ifdef CONFIG_NUMA_BALANCING 158 - .numa_preferred_nid = -1, 157 + .numa_preferred_nid = NUMA_NO_NODE, 159 158 .numa_group = NULL, 160 159 .numa_faults = NULL, 161 160 #endif
+2 -1
kernel/kthread.c
··· 20 20 #include <linux/freezer.h> 21 21 #include <linux/ptrace.h> 22 22 #include <linux/uaccess.h> 23 + #include <linux/numa.h> 23 24 #include <trace/events/sched.h> 24 25 25 26 static DEFINE_SPINLOCK(kthread_create_lock); ··· 676 675 { 677 676 struct kthread_worker *worker; 678 677 struct task_struct *task; 679 - int node = -1; 678 + int node = NUMA_NO_NODE; 680 679 681 680 worker = kzalloc(sizeof(*worker), GFP_KERNEL); 682 681 if (!worker)
+8 -7
kernel/sched/fair.c
··· 1160 1160 1161 1161 /* New address space, reset the preferred nid */ 1162 1162 if (!(clone_flags & CLONE_VM)) { 1163 - p->numa_preferred_nid = -1; 1163 + p->numa_preferred_nid = NUMA_NO_NODE; 1164 1164 return; 1165 1165 } 1166 1166 ··· 1180 1180 1181 1181 static void account_numa_enqueue(struct rq *rq, struct task_struct *p) 1182 1182 { 1183 - rq->nr_numa_running += (p->numa_preferred_nid != -1); 1183 + rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE); 1184 1184 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p)); 1185 1185 } 1186 1186 1187 1187 static void account_numa_dequeue(struct rq *rq, struct task_struct *p) 1188 1188 { 1189 - rq->nr_numa_running -= (p->numa_preferred_nid != -1); 1189 + rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE); 1190 1190 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p)); 1191 1191 } 1192 1192 ··· 1400 1400 * two full passes of the "multi-stage node selection" test that is 1401 1401 * executed below. 1402 1402 */ 1403 - if ((p->numa_preferred_nid == -1 || p->numa_scan_seq <= 4) && 1403 + if ((p->numa_preferred_nid == NUMA_NO_NODE || p->numa_scan_seq <= 4) && 1404 1404 (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid))) 1405 1405 return true; 1406 1406 ··· 1848 1848 unsigned long interval = HZ; 1849 1849 1850 1850 /* This task has no NUMA fault statistics yet */ 1851 - if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults)) 1851 + if (unlikely(p->numa_preferred_nid == NUMA_NO_NODE || !p->numa_faults)) 1852 1852 return; 1853 1853 1854 1854 /* Periodically retry migrating the task to the preferred node */ ··· 2095 2095 2096 2096 static void task_numa_placement(struct task_struct *p) 2097 2097 { 2098 - int seq, nid, max_nid = -1; 2098 + int seq, nid, max_nid = NUMA_NO_NODE; 2099 2099 unsigned long max_faults = 0; 2100 2100 unsigned long fault_types[2] = { 0, 0 }; 2101 2101 unsigned long total_faults; ··· 2638 2638 * the preferred node. 2639 2639 */ 2640 2640 if (dst_nid == p->numa_preferred_nid || 2641 - (p->numa_preferred_nid != -1 && src_nid != p->numa_preferred_nid)) 2641 + (p->numa_preferred_nid != NUMA_NO_NODE && 2642 + src_nid != p->numa_preferred_nid)) 2642 2643 return; 2643 2644 } 2644 2645
+2 -1
lib/cpumask.c
··· 5 5 #include <linux/cpumask.h> 6 6 #include <linux/export.h> 7 7 #include <linux/memblock.h> 8 + #include <linux/numa.h> 8 9 9 10 /** 10 11 * cpumask_next - get the next cpu in a cpumask ··· 207 206 /* Wrap: we always want a cpu. */ 208 207 i %= num_online_cpus(); 209 208 210 - if (node == -1) { 209 + if (node == NUMA_NO_NODE) { 211 210 for_each_cpu(cpu, cpu_online_mask) 212 211 if (i-- == 0) 213 212 return cpu;
+7 -6
mm/huge_memory.c
··· 33 33 #include <linux/page_idle.h> 34 34 #include <linux/shmem_fs.h> 35 35 #include <linux/oom.h> 36 + #include <linux/numa.h> 36 37 37 38 #include <asm/tlb.h> 38 39 #include <asm/pgalloc.h> ··· 1476 1475 struct anon_vma *anon_vma = NULL; 1477 1476 struct page *page; 1478 1477 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 1479 - int page_nid = -1, this_nid = numa_node_id(); 1478 + int page_nid = NUMA_NO_NODE, this_nid = numa_node_id(); 1480 1479 int target_nid, last_cpupid = -1; 1481 1480 bool page_locked; 1482 1481 bool migrated = false; ··· 1521 1520 */ 1522 1521 page_locked = trylock_page(page); 1523 1522 target_nid = mpol_misplaced(page, vma, haddr); 1524 - if (target_nid == -1) { 1523 + if (target_nid == NUMA_NO_NODE) { 1525 1524 /* If the page was locked, there are no parallel migrations */ 1526 1525 if (page_locked) 1527 1526 goto clear_pmdnuma; ··· 1529 1528 1530 1529 /* Migration could have started since the pmd_trans_migrating check */ 1531 1530 if (!page_locked) { 1532 - page_nid = -1; 1531 + page_nid = NUMA_NO_NODE; 1533 1532 if (!get_page_unless_zero(page)) 1534 1533 goto out_unlock; 1535 1534 spin_unlock(vmf->ptl); ··· 1550 1549 if (unlikely(!pmd_same(pmd, *vmf->pmd))) { 1551 1550 unlock_page(page); 1552 1551 put_page(page); 1553 - page_nid = -1; 1552 + page_nid = NUMA_NO_NODE; 1554 1553 goto out_unlock; 1555 1554 } 1556 1555 1557 1556 /* Bail if we fail to protect against THP splits for any reason */ 1558 1557 if (unlikely(!anon_vma)) { 1559 1558 put_page(page); 1560 - page_nid = -1; 1559 + page_nid = NUMA_NO_NODE; 1561 1560 goto clear_pmdnuma; 1562 1561 } 1563 1562 ··· 1619 1618 if (anon_vma) 1620 1619 page_unlock_anon_vma_read(anon_vma); 1621 1620 1622 - if (page_nid != -1) 1621 + if (page_nid != NUMA_NO_NODE) 1623 1622 task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, 1624 1623 flags); 1625 1624
+2 -1
mm/hugetlb.c
··· 25 25 #include <linux/swap.h> 26 26 #include <linux/swapops.h> 27 27 #include <linux/jhash.h> 28 + #include <linux/numa.h> 28 29 29 30 #include <asm/page.h> 30 31 #include <asm/pgtable.h> ··· 888 887 struct zonelist *zonelist; 889 888 struct zone *zone; 890 889 struct zoneref *z; 891 - int node = -1; 890 + int node = NUMA_NO_NODE; 892 891 893 892 zonelist = node_zonelist(nid, gfp_mask); 894 893
+1 -1
mm/ksm.c
··· 598 598 chain->chain_prune_time = jiffies; 599 599 chain->rmap_hlist_len = STABLE_NODE_CHAIN; 600 600 #if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA) 601 - chain->nid = -1; /* debug */ 601 + chain->nid = NUMA_NO_NODE; /* debug */ 602 602 #endif 603 603 ksm_stable_node_chains++; 604 604
+4 -3
mm/memory.c
··· 69 69 #include <linux/userfaultfd_k.h> 70 70 #include <linux/dax.h> 71 71 #include <linux/oom.h> 72 + #include <linux/numa.h> 72 73 73 74 #include <asm/io.h> 74 75 #include <asm/mmu_context.h> ··· 3587 3586 { 3588 3587 struct vm_area_struct *vma = vmf->vma; 3589 3588 struct page *page = NULL; 3590 - int page_nid = -1; 3589 + int page_nid = NUMA_NO_NODE; 3591 3590 int last_cpupid; 3592 3591 int target_nid; 3593 3592 bool migrated = false; ··· 3654 3653 target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid, 3655 3654 &flags); 3656 3655 pte_unmap_unlock(vmf->pte, vmf->ptl); 3657 - if (target_nid == -1) { 3656 + if (target_nid == NUMA_NO_NODE) { 3658 3657 put_page(page); 3659 3658 goto out; 3660 3659 } ··· 3668 3667 flags |= TNF_MIGRATE_FAIL; 3669 3668 3670 3669 out: 3671 - if (page_nid != -1) 3670 + if (page_nid != NUMA_NO_NODE) 3672 3671 task_numa_fault(last_cpupid, page_nid, 1, flags); 3673 3672 return 0; 3674 3673 }
+6 -6
mm/memory_hotplug.c
··· 702 702 { 703 703 int nid = zone_to_nid(zone); 704 704 705 - arg->status_change_nid = -1; 706 - arg->status_change_nid_normal = -1; 707 - arg->status_change_nid_high = -1; 705 + arg->status_change_nid = NUMA_NO_NODE; 706 + arg->status_change_nid_normal = NUMA_NO_NODE; 707 + arg->status_change_nid_high = NUMA_NO_NODE; 708 708 709 709 if (!node_state(nid, N_MEMORY)) 710 710 arg->status_change_nid = nid; ··· 1509 1509 unsigned long present_pages = 0; 1510 1510 enum zone_type zt; 1511 1511 1512 - arg->status_change_nid = -1; 1513 - arg->status_change_nid_normal = -1; 1514 - arg->status_change_nid_high = -1; 1512 + arg->status_change_nid = NUMA_NO_NODE; 1513 + arg->status_change_nid_normal = NUMA_NO_NODE; 1514 + arg->status_change_nid_high = NUMA_NO_NODE; 1515 1515 1516 1516 /* 1517 1517 * Check whether node_states[N_NORMAL_MEMORY] will be changed.
+1 -1
mm/mempolicy.c
··· 2304 2304 unsigned long pgoff; 2305 2305 int thiscpu = raw_smp_processor_id(); 2306 2306 int thisnid = cpu_to_node(thiscpu); 2307 - int polnid = -1; 2307 + int polnid = NUMA_NO_NODE; 2308 2308 int ret = -1; 2309 2309 2310 2310 pol = get_vma_policy(vma, addr);
+2 -2
mm/page_alloc.c
··· 6016 6016 return state->last_nid; 6017 6017 6018 6018 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); 6019 - if (nid != -1) { 6019 + if (nid != NUMA_NO_NODE) { 6020 6020 state->last_start = start_pfn; 6021 6021 state->last_end = end_pfn; 6022 6022 state->last_nid = nid; ··· 6771 6771 { 6772 6772 unsigned long accl_mask = 0, last_end = 0; 6773 6773 unsigned long start, end, mask; 6774 - int last_nid = -1; 6774 + int last_nid = NUMA_NO_NODE; 6775 6775 int i, nid; 6776 6776 6777 6777 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
+1 -1
mm/page_ext.c
··· 300 300 start = SECTION_ALIGN_DOWN(start_pfn); 301 301 end = SECTION_ALIGN_UP(start_pfn + nr_pages); 302 302 303 - if (nid == -1) { 303 + if (nid == NUMA_NO_NODE) { 304 304 /* 305 305 * In this case, "nid" already exists and contains valid memory. 306 306 * "start_pfn" passed to us is a pfn which is an arg for
+2 -1
net/core/pktgen.c
··· 158 158 #include <linux/etherdevice.h> 159 159 #include <linux/kthread.h> 160 160 #include <linux/prefetch.h> 161 + #include <linux/mmzone.h> 161 162 #include <net/net_namespace.h> 162 163 #include <net/checksum.h> 163 164 #include <net/ipv6.h> ··· 3626 3625 pkt_dev->svlan_cfi = 0; 3627 3626 pkt_dev->svlan_id = 0xffff; 3628 3627 pkt_dev->burst = 1; 3629 - pkt_dev->node = -1; 3628 + pkt_dev->node = NUMA_NO_NODE; 3630 3629 3631 3630 err = pktgen_setup_dev(t->net, pkt_dev, ifname); 3632 3631 if (err)
+2 -1
net/qrtr/qrtr.c
··· 15 15 #include <linux/netlink.h> 16 16 #include <linux/qrtr.h> 17 17 #include <linux/termios.h> /* For TIOCINQ/OUTQ */ 18 + #include <linux/numa.h> 18 19 19 20 #include <net/sock.h> 20 21 ··· 102 101 return container_of(sk, struct qrtr_sock, sk); 103 102 } 104 103 105 - static unsigned int qrtr_local_nid = -1; 104 + static unsigned int qrtr_local_nid = NUMA_NO_NODE; 106 105 107 106 /* for node ids */ 108 107 static RADIX_TREE(qrtr_nodes, GFP_KERNEL);