Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/percpu.c: optimize the code in pcpu_setup_first_chunk() a little bit

This removes the need of local varibale 'chunk', and optimize the code
calling pcpu_alloc_first_chunk() to initialize reserved chunk and
dynamic chunk to make it simpler.

Signed-off-by: Baoquan He <bhe@redhat.com>
[Dennis: reworded first chunk init comment]
Signed-off-by: Dennis Zhou <dennis@kernel.org>

authored by

Baoquan He and committed by
Dennis Zhou
7ee1e758 5b672085

+15 -23
+15 -23
mm/percpu.c
··· 2581 2581 { 2582 2582 size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 2583 2583 size_t static_size, dyn_size; 2584 - struct pcpu_chunk *chunk; 2585 2584 unsigned long *group_offsets; 2586 2585 size_t *group_sizes; 2587 2586 unsigned long *unit_off; 2588 2587 unsigned int cpu; 2589 2588 int *unit_map; 2590 2589 int group, unit, i; 2591 - int map_size; 2592 2590 unsigned long tmp_addr; 2593 2591 size_t alloc_size; 2594 2592 ··· 2695 2697 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; 2696 2698 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; 2697 2699 pcpu_atom_size = ai->atom_size; 2698 - pcpu_chunk_struct_size = struct_size(chunk, populated, 2700 + pcpu_chunk_struct_size = struct_size((struct pcpu_chunk *)0, populated, 2699 2701 BITS_TO_LONGS(pcpu_unit_pages)); 2700 2702 2701 2703 pcpu_stats_save_ai(ai); ··· 2732 2734 dyn_size = ai->dyn_size - (static_size - ai->static_size); 2733 2735 2734 2736 /* 2735 - * Initialize first chunk. 2736 - * If the reserved_size is non-zero, this initializes the reserved 2737 - * chunk. If the reserved_size is zero, the reserved chunk is NULL 2738 - * and the dynamic region is initialized here. The first chunk, 2739 - * pcpu_first_chunk, will always point to the chunk that serves 2740 - * the dynamic region. 2737 + * Initialize first chunk: 2738 + * This chunk is broken up into 3 parts: 2739 + * < static | [reserved] | dynamic > 2740 + * - static - there is no backing chunk because these allocations can 2741 + * never be freed. 2742 + * - reserved (pcpu_reserved_chunk) - exists primarily to serve 2743 + * allocations from module load. 2744 + * - dynamic (pcpu_first_chunk) - serves the dynamic part of the first 2745 + * chunk. 2741 2746 */ 2742 2747 tmp_addr = (unsigned long)base_addr + static_size; 2743 - map_size = ai->reserved_size ?: dyn_size; 2744 - chunk = pcpu_alloc_first_chunk(tmp_addr, map_size); 2748 + if (ai->reserved_size) 2749 + pcpu_reserved_chunk = pcpu_alloc_first_chunk(tmp_addr, 2750 + ai->reserved_size); 2751 + tmp_addr = (unsigned long)base_addr + static_size + ai->reserved_size; 2752 + pcpu_first_chunk = pcpu_alloc_first_chunk(tmp_addr, dyn_size); 2745 2753 2746 - /* init dynamic chunk if necessary */ 2747 - if (ai->reserved_size) { 2748 - pcpu_reserved_chunk = chunk; 2749 - 2750 - tmp_addr = (unsigned long)base_addr + static_size + 2751 - ai->reserved_size; 2752 - map_size = dyn_size; 2753 - chunk = pcpu_alloc_first_chunk(tmp_addr, map_size); 2754 - } 2755 - 2756 - /* link the first chunk in */ 2757 - pcpu_first_chunk = chunk; 2758 2754 pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages; 2759 2755 pcpu_chunk_relocate(pcpu_first_chunk, -1); 2760 2756