Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dm vdo wait-queue: add proper namespace to interface

Rename various interfaces and structs associated with vdo's wait-queue,
e.g.: s/wait_queue/vdo_wait_queue/, s/waiter/vdo_waiter/, etc.

Now all function names start with "vdo_waitq_" or "vdo_waiter_".

Reviewed-by: Ken Raeburn <raeburn@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
Signed-off-by: Matthew Sakai <msakai@redhat.com>

+413 -389
+70 -64
drivers/md/dm-vdo/block-map.c
··· 85 85 struct cursors; 86 86 87 87 struct cursor { 88 - struct waiter waiter; 88 + struct vdo_waiter waiter; 89 89 struct block_map_tree *tree; 90 90 height_t height; 91 91 struct cursors *parent; ··· 162 162 return &cache->pages[(info - cache->infos) * VDO_BLOCK_SIZE]; 163 163 } 164 164 165 - static inline struct vdo_page_completion *page_completion_from_waiter(struct waiter *waiter) 165 + static inline struct vdo_page_completion *page_completion_from_waiter(struct vdo_waiter *waiter) 166 166 { 167 167 struct vdo_page_completion *completion; 168 168 ··· 407 407 if (result != UDS_SUCCESS) 408 408 return result; 409 409 410 - result = ASSERT(!vdo_has_waiters(&info->waiting), 410 + result = ASSERT(!vdo_waitq_has_waiters(&info->waiting), 411 411 "VDO Page must not have waiters"); 412 412 if (result != UDS_SUCCESS) 413 413 return result; ··· 506 506 * 507 507 * Implements waiter_callback_fn. 508 508 */ 509 - static void complete_waiter_with_error(struct waiter *waiter, void *result_ptr) 509 + static void complete_waiter_with_error(struct vdo_waiter *waiter, void *result_ptr) 510 510 { 511 511 int *result = result_ptr; 512 512 ··· 520 520 * 521 521 * Implements waiter_callback_fn. 522 522 */ 523 - static void complete_waiter_with_page(struct waiter *waiter, void *page_info) 523 + static void complete_waiter_with_page(struct vdo_waiter *waiter, void *page_info) 524 524 { 525 525 complete_with_page(page_info, page_completion_from_waiter(waiter)); 526 526 } 527 527 528 528 /** 529 - * distribute_page_over_queue() - Complete a queue of VDO page completions with a page result. 529 + * distribute_page_over_waitq() - Complete a waitq of VDO page completions with a page result. 530 530 * 531 - * Upon completion the queue will be empty. 531 + * Upon completion the waitq will be empty. 532 532 * 533 533 * Return: The number of pages distributed. 534 534 */ 535 - static unsigned int distribute_page_over_queue(struct page_info *info, 536 - struct wait_queue *queue) 535 + static unsigned int distribute_page_over_waitq(struct page_info *info, 536 + struct vdo_wait_queue *waitq) 537 537 { 538 538 size_t pages; 539 539 540 540 update_lru(info); 541 - pages = vdo_count_waiters(queue); 541 + pages = vdo_waitq_num_waiters(waitq); 542 542 543 543 /* 544 544 * Increment the busy count once for each pending completion so that this page does not ··· 546 546 */ 547 547 info->busy += pages; 548 548 549 - vdo_notify_all_waiters(queue, complete_waiter_with_page, info); 549 + vdo_waitq_notify_all_waiters(waitq, complete_waiter_with_page, info); 550 550 return pages; 551 551 } 552 552 ··· 572 572 573 573 assert_on_cache_thread(cache, __func__); 574 574 575 - vdo_notify_all_waiters(&cache->free_waiters, complete_waiter_with_error, 576 - &result); 575 + vdo_waitq_notify_all_waiters(&cache->free_waiters, 576 + complete_waiter_with_error, &result); 577 577 cache->waiter_count = 0; 578 578 579 - for (info = cache->infos; info < cache->infos + cache->page_count; info++) 580 - vdo_notify_all_waiters(&info->waiting, complete_waiter_with_error, 581 - &result); 579 + for (info = cache->infos; info < cache->infos + cache->page_count; info++) { 580 + vdo_waitq_notify_all_waiters(&info->waiting, 581 + complete_waiter_with_error, &result); 582 + } 582 583 } 583 584 584 585 /** ··· 626 625 { 627 626 if (vdo_is_state_draining(&zone->state) && 628 627 (zone->active_lookups == 0) && 629 - !vdo_has_waiters(&zone->flush_waiters) && 628 + !vdo_waitq_has_waiters(&zone->flush_waiters) && 630 629 !is_vio_pool_busy(zone->vio_pool) && 631 630 (zone->page_cache.outstanding_reads == 0) && 632 631 (zone->page_cache.outstanding_writes == 0)) { ··· 644 643 * We are in read-only mode, so we won't ever write any page out. Just take all waiters off 645 644 * the queue so the zone can drain. 646 645 */ 647 - while (vdo_has_waiters(&zone->flush_waiters)) 648 - vdo_dequeue_next_waiter(&zone->flush_waiters); 646 + while (vdo_waitq_has_waiters(&zone->flush_waiters)) 647 + vdo_waitq_dequeue_next_waiter(&zone->flush_waiters); 649 648 650 649 check_for_drain_complete(zone); 651 650 } ··· 678 677 vdo_enter_read_only_mode(cache->zone->block_map->vdo, result); 679 678 ADD_ONCE(cache->stats.failed_reads, 1); 680 679 set_info_state(info, PS_FAILED); 681 - vdo_notify_all_waiters(&info->waiting, complete_waiter_with_error, &result); 680 + vdo_waitq_notify_all_waiters(&info->waiting, complete_waiter_with_error, &result); 682 681 reset_page_info(info); 683 682 684 683 /* ··· 721 720 722 721 info->recovery_lock = 0; 723 722 set_info_state(info, PS_RESIDENT); 724 - distribute_page_over_queue(info, &info->waiting); 723 + distribute_page_over_waitq(info, &info->waiting); 725 724 726 725 /* 727 726 * Don't decrement until right before calling check_for_drain_complete() to ··· 875 874 * 876 875 * Return: true if the page completion is for the desired page number. 877 876 */ 878 - static bool completion_needs_page(struct waiter *waiter, void *context) 877 + static bool completion_needs_page(struct vdo_waiter *waiter, void *context) 879 878 { 880 879 physical_block_number_t *pbn = context; 881 880 ··· 889 888 static void allocate_free_page(struct page_info *info) 890 889 { 891 890 int result; 892 - struct waiter *oldest_waiter; 891 + struct vdo_waiter *oldest_waiter; 893 892 physical_block_number_t pbn; 894 893 struct vdo_page_cache *cache = info->cache; 895 894 896 895 assert_on_cache_thread(cache, __func__); 897 896 898 - if (!vdo_has_waiters(&cache->free_waiters)) { 897 + if (!vdo_waitq_has_waiters(&cache->free_waiters)) { 899 898 if (cache->stats.cache_pressure > 0) { 900 899 uds_log_info("page cache pressure relieved"); 901 900 WRITE_ONCE(cache->stats.cache_pressure, 0); ··· 910 909 return; 911 910 } 912 911 913 - oldest_waiter = vdo_get_first_waiter(&cache->free_waiters); 912 + oldest_waiter = vdo_waitq_get_first_waiter(&cache->free_waiters); 914 913 pbn = page_completion_from_waiter(oldest_waiter)->pbn; 915 914 916 915 /* 917 916 * Remove all entries which match the page number in question and push them onto the page 918 917 * info's wait queue. 919 918 */ 920 - vdo_dequeue_matching_waiters(&cache->free_waiters, completion_needs_page, 921 - &pbn, &info->waiting); 922 - cache->waiter_count -= vdo_count_waiters(&info->waiting); 919 + vdo_waitq_dequeue_matching_waiters(&cache->free_waiters, completion_needs_page, 920 + &pbn, &info->waiting); 921 + cache->waiter_count -= vdo_waitq_num_waiters(&info->waiting); 923 922 924 923 result = launch_page_load(info, pbn); 925 - if (result != VDO_SUCCESS) 926 - vdo_notify_all_waiters(&info->waiting, complete_waiter_with_error, &result); 924 + if (result != VDO_SUCCESS) { 925 + vdo_waitq_notify_all_waiters(&info->waiting, 926 + complete_waiter_with_error, &result); 927 + } 927 928 } 928 929 929 930 /** ··· 969 966 struct vdo_page_cache *cache = vdo_page_comp->cache; 970 967 971 968 cache->waiter_count++; 972 - vdo_enqueue_waiter(&cache->free_waiters, &vdo_page_comp->waiter); 969 + vdo_waitq_enqueue_waiter(&cache->free_waiters, &vdo_page_comp->waiter); 973 970 discard_a_page(cache); 974 971 } 975 972 ··· 1072 1069 cache->zone->zone_number); 1073 1070 info->recovery_lock = 0; 1074 1071 was_discard = write_has_finished(info); 1075 - reclaimed = (!was_discard || (info->busy > 0) || vdo_has_waiters(&info->waiting)); 1072 + reclaimed = (!was_discard || (info->busy > 0) || vdo_waitq_has_waiters(&info->waiting)); 1076 1073 1077 1074 set_info_state(info, PS_RESIDENT); 1078 1075 1079 - reclamations = distribute_page_over_queue(info, &info->waiting); 1076 + reclamations = distribute_page_over_waitq(info, &info->waiting); 1080 1077 ADD_ONCE(cache->stats.reclaimed, reclamations); 1081 1078 1082 1079 if (was_discard) ··· 1190 1187 { 1191 1188 int result; 1192 1189 1193 - vdo_enqueue_waiter(&info->waiting, &vdo_page_comp->waiter); 1190 + vdo_waitq_enqueue_waiter(&info->waiting, &vdo_page_comp->waiter); 1194 1191 result = launch_page_load(info, vdo_page_comp->pbn); 1195 - if (result != VDO_SUCCESS) 1196 - vdo_notify_all_waiters(&info->waiting, complete_waiter_with_error, &result); 1192 + if (result != VDO_SUCCESS) { 1193 + vdo_waitq_notify_all_waiters(&info->waiting, 1194 + complete_waiter_with_error, &result); 1195 + } 1197 1196 } 1198 1197 1199 1198 /** ··· 1256 1251 (is_outgoing(info) && page_completion->writable)) { 1257 1252 /* The page is unusable until it has finished I/O. */ 1258 1253 ADD_ONCE(cache->stats.wait_for_page, 1); 1259 - vdo_enqueue_waiter(&info->waiting, &page_completion->waiter); 1254 + vdo_waitq_enqueue_waiter(&info->waiting, &page_completion->waiter); 1260 1255 return; 1261 1256 } 1262 1257 ··· 1481 1476 { 1482 1477 u32 new_count; 1483 1478 int result; 1484 - bool decrement_old = vdo_is_waiting(&page->waiter); 1479 + bool decrement_old = vdo_waiter_is_waiting(&page->waiter); 1485 1480 u8 old_generation = page->generation; 1486 1481 1487 1482 if (decrement_old && (old_generation == new_generation)) ··· 1503 1498 static void write_page(struct tree_page *tree_page, struct pooled_vio *vio); 1504 1499 1505 1500 /* Implements waiter_callback_fn */ 1506 - static void write_page_callback(struct waiter *waiter, void *context) 1501 + static void write_page_callback(struct vdo_waiter *waiter, void *context) 1507 1502 { 1508 1503 write_page(container_of(waiter, struct tree_page, waiter), context); 1509 1504 } 1510 1505 1511 - static void acquire_vio(struct waiter *waiter, struct block_map_zone *zone) 1506 + static void acquire_vio(struct vdo_waiter *waiter, struct block_map_zone *zone) 1512 1507 { 1513 1508 waiter->callback = write_page_callback; 1514 1509 acquire_vio_from_pool(zone->vio_pool, waiter); ··· 1535 1530 return; 1536 1531 } 1537 1532 1538 - vdo_enqueue_waiter(&zone->flush_waiters, &page->waiter); 1533 + vdo_waitq_enqueue_waiter(&zone->flush_waiters, &page->waiter); 1539 1534 } 1540 1535 1541 - static void write_page_if_not_dirtied(struct waiter *waiter, void *context) 1536 + static void write_page_if_not_dirtied(struct vdo_waiter *waiter, void *context) 1542 1537 { 1543 1538 struct tree_page *page = container_of(waiter, struct tree_page, waiter); 1544 1539 struct write_if_not_dirtied_context *write_context = context; ··· 1581 1576 .generation = page->writing_generation, 1582 1577 }; 1583 1578 1584 - vdo_notify_all_waiters(&zone->flush_waiters, 1585 - write_page_if_not_dirtied, &context); 1579 + vdo_waitq_notify_all_waiters(&zone->flush_waiters, 1580 + write_page_if_not_dirtied, &context); 1586 1581 if (dirty && attempt_increment(zone)) { 1587 1582 write_page(page, pooled); 1588 1583 return; ··· 1593 1588 1594 1589 if (dirty) { 1595 1590 enqueue_page(page, zone); 1596 - } else if ((zone->flusher == NULL) && vdo_has_waiters(&zone->flush_waiters) && 1591 + } else if ((zone->flusher == NULL) && vdo_waitq_has_waiters(&zone->flush_waiters) && 1597 1592 attempt_increment(zone)) { 1598 1593 zone->flusher = 1599 - container_of(vdo_dequeue_next_waiter(&zone->flush_waiters), 1594 + container_of(vdo_waitq_dequeue_next_waiter(&zone->flush_waiters), 1600 1595 struct tree_page, waiter); 1601 1596 write_page(zone->flusher, pooled); 1602 1597 return; ··· 1729 1724 continue_data_vio_with_error(data_vio, result); 1730 1725 } 1731 1726 1732 - static void abort_lookup_for_waiter(struct waiter *waiter, void *context) 1727 + static void abort_lookup_for_waiter(struct vdo_waiter *waiter, void *context) 1733 1728 { 1734 - struct data_vio *data_vio = waiter_as_data_vio(waiter); 1729 + struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter); 1735 1730 int result = *((int *) context); 1736 1731 1737 1732 if (!data_vio->write) { ··· 1751 1746 1752 1747 if (data_vio->tree_lock.locked) { 1753 1748 release_page_lock(data_vio, what); 1754 - vdo_notify_all_waiters(&data_vio->tree_lock.waiters, 1755 - abort_lookup_for_waiter, &result); 1749 + vdo_waitq_notify_all_waiters(&data_vio->tree_lock.waiters, 1750 + abort_lookup_for_waiter, 1751 + &result); 1756 1752 } 1757 1753 1758 1754 finish_lookup(data_vio, result); ··· 1819 1813 load_block_map_page(data_vio->logical.zone->block_map_zone, data_vio); 1820 1814 } 1821 1815 1822 - static void continue_load_for_waiter(struct waiter *waiter, void *context) 1816 + static void continue_load_for_waiter(struct vdo_waiter *waiter, void *context) 1823 1817 { 1824 - struct data_vio *data_vio = waiter_as_data_vio(waiter); 1818 + struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter); 1825 1819 1826 1820 data_vio->tree_lock.height--; 1827 1821 continue_with_loaded_page(data_vio, context); ··· 1851 1845 1852 1846 /* Release our claim to the load and wake any waiters */ 1853 1847 release_page_lock(data_vio, "load"); 1854 - vdo_notify_all_waiters(&tree_lock->waiters, continue_load_for_waiter, page); 1848 + vdo_waitq_notify_all_waiters(&tree_lock->waiters, continue_load_for_waiter, page); 1855 1849 continue_with_loaded_page(data_vio, page); 1856 1850 } 1857 1851 ··· 1877 1871 data_vio->logical.zone->thread_id); 1878 1872 } 1879 1873 1880 - static void load_page(struct waiter *waiter, void *context) 1874 + static void load_page(struct vdo_waiter *waiter, void *context) 1881 1875 { 1882 1876 struct pooled_vio *pooled = context; 1883 - struct data_vio *data_vio = waiter_as_data_vio(waiter); 1877 + struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter); 1884 1878 struct tree_lock *lock = &data_vio->tree_lock; 1885 1879 physical_block_number_t pbn = lock->tree_slots[lock->height - 1].block_map_slot.pbn; 1886 1880 ··· 1922 1916 } 1923 1917 1924 1918 /* Someone else is loading or allocating the page we need */ 1925 - vdo_enqueue_waiter(&lock_holder->waiters, &data_vio->waiter); 1919 + vdo_waitq_enqueue_waiter(&lock_holder->waiters, &data_vio->waiter); 1926 1920 return VDO_SUCCESS; 1927 1921 } 1928 1922 ··· 1954 1948 abort_lookup(data_vio, completion->result, "allocation"); 1955 1949 } 1956 1950 1957 - static void continue_allocation_for_waiter(struct waiter *waiter, void *context) 1951 + static void continue_allocation_for_waiter(struct vdo_waiter *waiter, void *context) 1958 1952 { 1959 - struct data_vio *data_vio = waiter_as_data_vio(waiter); 1953 + struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter); 1960 1954 struct tree_lock *tree_lock = &data_vio->tree_lock; 1961 1955 physical_block_number_t pbn = *((physical_block_number_t *) context); 1962 1956 ··· 2016 2010 2017 2011 list_del_init(&page->entry); 2018 2012 2019 - result = ASSERT(!vdo_is_waiting(&page->waiter), 2013 + result = ASSERT(!vdo_waiter_is_waiting(&page->waiter), 2020 2014 "Newly expired page not already waiting to write"); 2021 2015 if (result != VDO_SUCCESS) { 2022 2016 enter_zone_read_only_mode(zone, result); ··· 2095 2089 VDO_MAPPING_STATE_UNCOMPRESSED, 2096 2090 &tree_page->recovery_lock); 2097 2091 2098 - if (vdo_is_waiting(&tree_page->waiter)) { 2092 + if (vdo_waiter_is_waiting(&tree_page->waiter)) { 2099 2093 /* This page is waiting to be written out. */ 2100 2094 if (zone->flusher != tree_page) { 2101 2095 /* ··· 2123 2117 2124 2118 /* Release our claim to the allocation and wake any waiters */ 2125 2119 release_page_lock(data_vio, "allocation"); 2126 - vdo_notify_all_waiters(&tree_lock->waiters, continue_allocation_for_waiter, 2127 - &pbn); 2120 + vdo_waitq_notify_all_waiters(&tree_lock->waiters, 2121 + continue_allocation_for_waiter, &pbn); 2128 2122 if (tree_lock->height == 0) { 2129 2123 finish_lookup(data_vio, VDO_SUCCESS); 2130 2124 return; ··· 2330 2324 */ 2331 2325 void vdo_write_tree_page(struct tree_page *page, struct block_map_zone *zone) 2332 2326 { 2333 - bool waiting = vdo_is_waiting(&page->waiter); 2327 + bool waiting = vdo_waiter_is_waiting(&page->waiter); 2334 2328 2335 2329 if (waiting && (zone->flusher == page)) 2336 2330 return; ··· 2636 2630 * 2637 2631 * Implements waiter_callback_fn. 2638 2632 */ 2639 - static void launch_cursor(struct waiter *waiter, void *context) 2633 + static void launch_cursor(struct vdo_waiter *waiter, void *context) 2640 2634 { 2641 2635 struct cursor *cursor = container_of(waiter, struct cursor, waiter); 2642 2636 struct pooled_vio *pooled = context;
+5 -5
drivers/md/dm-vdo/block-map.h
··· 68 68 /* how many VPCs waiting for free page */ 69 69 unsigned int waiter_count; 70 70 /* queue of waiters who want a free page */ 71 - struct wait_queue free_waiters; 71 + struct vdo_wait_queue free_waiters; 72 72 /* 73 73 * Statistics are only updated on the logical zone thread, but are accessed from other 74 74 * threads. ··· 129 129 /* page state */ 130 130 enum vdo_page_buffer_state state; 131 131 /* queue of completions awaiting this item */ 132 - struct wait_queue waiting; 132 + struct vdo_wait_queue waiting; 133 133 /* state linked list entry */ 134 134 struct list_head state_entry; 135 135 /* LRU entry */ ··· 153 153 /* The cache involved */ 154 154 struct vdo_page_cache *cache; 155 155 /* The waiter for the pending list */ 156 - struct waiter waiter; 156 + struct vdo_waiter waiter; 157 157 /* The absolute physical block number of the page on disk */ 158 158 physical_block_number_t pbn; 159 159 /* Whether the page may be modified */ ··· 167 167 struct forest; 168 168 169 169 struct tree_page { 170 - struct waiter waiter; 170 + struct vdo_waiter waiter; 171 171 172 172 /* Dirty list entry */ 173 173 struct list_head entry; ··· 228 228 struct vio_pool *vio_pool; 229 229 /* The tree page which has issued or will be issuing a flush */ 230 230 struct tree_page *flusher; 231 - struct wait_queue flush_waiters; 231 + struct vdo_wait_queue flush_waiters; 232 232 /* The generation after the most recent flush */ 233 233 u8 generation; 234 234 u8 oldest_generation;
+7 -7
drivers/md/dm-vdo/data-vio.c
··· 249 249 250 250 lock->lbn = lbn; 251 251 lock->locked = false; 252 - vdo_initialize_wait_queue(&lock->waiters); 252 + vdo_waitq_init(&lock->waiters); 253 253 zone_number = vdo_compute_logical_zone(data_vio); 254 254 lock->zone = &vdo->logical_zones->zones[zone_number]; 255 255 } ··· 466 466 } 467 467 468 468 data_vio->last_async_operation = VIO_ASYNC_OP_ATTEMPT_LOGICAL_BLOCK_LOCK; 469 - vdo_enqueue_waiter(&lock_holder->logical.waiters, &data_vio->waiter); 469 + vdo_waitq_enqueue_waiter(&lock_holder->logical.waiters, &data_vio->waiter); 470 470 471 471 /* 472 472 * Prevent writes and read-modify-writes from blocking indefinitely on lock holders in the ··· 1191 1191 1192 1192 /* Another data_vio is waiting for the lock, transfer it in a single lock map operation. */ 1193 1193 next_lock_holder = 1194 - waiter_as_data_vio(vdo_dequeue_next_waiter(&lock->waiters)); 1194 + vdo_waiter_as_data_vio(vdo_waitq_dequeue_next_waiter(&lock->waiters)); 1195 1195 1196 1196 /* Transfer the remaining lock waiters to the next lock holder. */ 1197 - vdo_transfer_all_waiters(&lock->waiters, 1198 - &next_lock_holder->logical.waiters); 1197 + vdo_waitq_transfer_all_waiters(&lock->waiters, 1198 + &next_lock_holder->logical.waiters); 1199 1199 1200 1200 result = vdo_int_map_put(lock->zone->lbn_operations, lock->lbn, 1201 1201 next_lock_holder, true, (void **) &lock_holder); ··· 1213 1213 * If there are still waiters, other data_vios must be trying to get the lock we just 1214 1214 * transferred. We must ensure that the new lock holder doesn't block in the packer. 1215 1215 */ 1216 - if (vdo_has_waiters(&next_lock_holder->logical.waiters)) 1216 + if (vdo_waitq_has_waiters(&next_lock_holder->logical.waiters)) 1217 1217 cancel_data_vio_compression(next_lock_holder); 1218 1218 1219 1219 /* ··· 1235 1235 1236 1236 assert_data_vio_in_logical_zone(data_vio); 1237 1237 1238 - if (vdo_has_waiters(&lock->waiters)) 1238 + if (vdo_waitq_has_waiters(&lock->waiters)) 1239 1239 transfer_lock(data_vio, lock); 1240 1240 else 1241 1241 release_lock(data_vio, lock);
+6 -6
drivers/md/dm-vdo/data-vio.h
··· 54 54 struct lbn_lock { 55 55 logical_block_number_t lbn; 56 56 bool locked; 57 - struct wait_queue waiters; 57 + struct vdo_wait_queue waiters; 58 58 struct logical_zone *zone; 59 59 }; 60 60 ··· 75 75 /* The key for the lock map */ 76 76 u64 key; 77 77 /* The queue of waiters for the page this vio is allocating or loading */ 78 - struct wait_queue waiters; 78 + struct vdo_wait_queue waiters; 79 79 /* The block map tree slots for this LBN */ 80 80 struct block_map_tree_slot tree_slots[VDO_BLOCK_MAP_TREE_HEIGHT + 1]; 81 81 }; ··· 168 168 bool increment; 169 169 struct zoned_pbn zpbn; 170 170 struct pbn_lock *lock; 171 - struct waiter waiter; 171 + struct vdo_waiter waiter; 172 172 }; 173 173 174 174 /* A vio for processing user data requests. */ 175 175 struct data_vio { 176 - /* The wait_queue entry structure */ 177 - struct waiter waiter; 176 + /* The vdo_wait_queue entry structure */ 177 + struct vdo_waiter waiter; 178 178 179 179 /* The logical block of this request */ 180 180 struct lbn_lock logical; ··· 288 288 return vio_as_data_vio(as_vio(completion)); 289 289 } 290 290 291 - static inline struct data_vio *waiter_as_data_vio(struct waiter *waiter) 291 + static inline struct data_vio *vdo_waiter_as_data_vio(struct vdo_waiter *waiter) 292 292 { 293 293 if (waiter == NULL) 294 294 return NULL;
+24 -24
drivers/md/dm-vdo/dedupe.c
··· 270 270 * to get the information they all need to deduplicate--either against each other, or 271 271 * against an existing duplicate on disk. 272 272 */ 273 - struct wait_queue waiters; 273 + struct vdo_wait_queue waiters; 274 274 }; 275 275 276 276 enum { ··· 351 351 memset(lock, 0, sizeof(*lock)); 352 352 INIT_LIST_HEAD(&lock->pool_node); 353 353 INIT_LIST_HEAD(&lock->duplicate_ring); 354 - vdo_initialize_wait_queue(&lock->waiters); 354 + vdo_waitq_init(&lock->waiters); 355 355 list_add_tail(&lock->pool_node, &zone->lock_pool); 356 356 } 357 357 ··· 420 420 */ 421 421 static inline struct data_vio *dequeue_lock_waiter(struct hash_lock *lock) 422 422 { 423 - return waiter_as_data_vio(vdo_dequeue_next_waiter(&lock->waiters)); 423 + return vdo_waiter_as_data_vio(vdo_waitq_dequeue_next_waiter(&lock->waiters)); 424 424 } 425 425 426 426 /** ··· 536 536 */ 537 537 static void wait_on_hash_lock(struct hash_lock *lock, struct data_vio *data_vio) 538 538 { 539 - vdo_enqueue_waiter(&lock->waiters, &data_vio->waiter); 539 + vdo_waitq_enqueue_waiter(&lock->waiters, &data_vio->waiter); 540 540 541 541 /* 542 542 * Make sure the agent doesn't block indefinitely in the packer since it now has at least ··· 562 562 * @waiter: The data_vio's waiter link. 563 563 * @context: Not used. 564 564 */ 565 - static void abort_waiter(struct waiter *waiter, void *context __always_unused) 565 + static void abort_waiter(struct vdo_waiter *waiter, void *context __always_unused) 566 566 { 567 - write_data_vio(waiter_as_data_vio(waiter)); 567 + write_data_vio(vdo_waiter_as_data_vio(waiter)); 568 568 } 569 569 570 570 /** ··· 602 602 /* Ensure we don't attempt to update advice when cleaning up. */ 603 603 lock->update_advice = false; 604 604 605 - vdo_notify_all_waiters(&lock->waiters, abort_waiter, NULL); 605 + vdo_waitq_notify_all_waiters(&lock->waiters, abort_waiter, NULL); 606 606 607 607 if (lock->duplicate_lock != NULL) { 608 608 /* The agent must reference the duplicate zone to launch it. */ ··· 650 650 */ 651 651 lock->verified = false; 652 652 653 - if (vdo_has_waiters(&lock->waiters)) { 653 + if (vdo_waitq_has_waiters(&lock->waiters)) { 654 654 /* 655 655 * UNLOCKING -> LOCKING transition: A new data_vio entered the hash lock while the 656 656 * agent was releasing the PBN lock. The current agent exits and the waiter has to ··· 750 750 */ 751 751 lock->update_advice = false; 752 752 753 - if (vdo_has_waiters(&lock->waiters)) { 753 + if (vdo_waitq_has_waiters(&lock->waiters)) { 754 754 /* 755 755 * UPDATING -> DEDUPING transition: A new data_vio arrived during the UDS update. 756 756 * Send it on the verified dedupe path. The agent is done with the lock, but the ··· 812 812 struct data_vio *agent = data_vio; 813 813 814 814 ASSERT_LOG_ONLY(lock->agent == NULL, "shouldn't have an agent in DEDUPING"); 815 - ASSERT_LOG_ONLY(!vdo_has_waiters(&lock->waiters), 815 + ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&lock->waiters), 816 816 "shouldn't have any lock waiters in DEDUPING"); 817 817 818 818 /* Just release the lock reference if other data_vios are still deduping. */ ··· 917 917 * Implements waiter_callback_fn. Binds the data_vio that was waiting to a new hash lock and waits 918 918 * on that lock. 919 919 */ 920 - static void enter_forked_lock(struct waiter *waiter, void *context) 920 + static void enter_forked_lock(struct vdo_waiter *waiter, void *context) 921 921 { 922 - struct data_vio *data_vio = waiter_as_data_vio(waiter); 922 + struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter); 923 923 struct hash_lock *new_lock = context; 924 924 925 925 set_hash_lock(data_vio, new_lock); ··· 956 956 set_hash_lock(new_agent, new_lock); 957 957 new_lock->agent = new_agent; 958 958 959 - vdo_notify_all_waiters(&old_lock->waiters, enter_forked_lock, new_lock); 959 + vdo_waitq_notify_all_waiters(&old_lock->waiters, enter_forked_lock, new_lock); 960 960 961 961 new_agent->is_duplicate = false; 962 962 start_writing(new_lock, new_agent); ··· 1033 1033 launch_dedupe(lock, agent, true); 1034 1034 agent = NULL; 1035 1035 } 1036 - while (vdo_has_waiters(&lock->waiters)) 1036 + while (vdo_waitq_has_waiters(&lock->waiters)) 1037 1037 launch_dedupe(lock, dequeue_lock_waiter(lock), false); 1038 1038 1039 1039 if (agent_is_done) { ··· 1454 1454 lock->update_advice = true; 1455 1455 1456 1456 /* If there are any waiters, we need to start deduping them. */ 1457 - if (vdo_has_waiters(&lock->waiters)) { 1457 + if (vdo_waitq_has_waiters(&lock->waiters)) { 1458 1458 /* 1459 1459 * WRITING -> DEDUPING transition: an asynchronously-written block failed to 1460 1460 * compress, so the PBN lock on the written copy was already transferred. The agent ··· 1502 1502 */ 1503 1503 static struct data_vio *select_writing_agent(struct hash_lock *lock) 1504 1504 { 1505 - struct wait_queue temp_queue; 1505 + struct vdo_wait_queue temp_queue; 1506 1506 struct data_vio *data_vio; 1507 1507 1508 - vdo_initialize_wait_queue(&temp_queue); 1508 + vdo_waitq_init(&temp_queue); 1509 1509 1510 1510 /* 1511 1511 * Move waiters to the temp queue one-by-one until we find an allocation. Not ideal to ··· 1514 1514 while (((data_vio = dequeue_lock_waiter(lock)) != NULL) && 1515 1515 !data_vio_has_allocation(data_vio)) { 1516 1516 /* Use the lower-level enqueue since we're just moving waiters around. */ 1517 - vdo_enqueue_waiter(&temp_queue, &data_vio->waiter); 1517 + vdo_waitq_enqueue_waiter(&temp_queue, &data_vio->waiter); 1518 1518 } 1519 1519 1520 1520 if (data_vio != NULL) { ··· 1522 1522 * Move the rest of the waiters over to the temp queue, preserving the order they 1523 1523 * arrived at the lock. 1524 1524 */ 1525 - vdo_transfer_all_waiters(&lock->waiters, &temp_queue); 1525 + vdo_waitq_transfer_all_waiters(&lock->waiters, &temp_queue); 1526 1526 1527 1527 /* 1528 1528 * The current agent is being replaced and will have to wait to dedupe; make it the 1529 1529 * first waiter since it was the first to reach the lock. 1530 1530 */ 1531 - vdo_enqueue_waiter(&lock->waiters, &lock->agent->waiter); 1531 + vdo_waitq_enqueue_waiter(&lock->waiters, &lock->agent->waiter); 1532 1532 lock->agent = data_vio; 1533 1533 } else { 1534 1534 /* No one has an allocation, so keep the current agent. */ ··· 1536 1536 } 1537 1537 1538 1538 /* Swap all the waiters back onto the lock's queue. */ 1539 - vdo_transfer_all_waiters(&temp_queue, &lock->waiters); 1539 + vdo_waitq_transfer_all_waiters(&temp_queue, &lock->waiters); 1540 1540 return data_vio; 1541 1541 } 1542 1542 ··· 1577 1577 * If the agent compresses, it might wait indefinitely in the packer, which would be bad if 1578 1578 * there are any other data_vios waiting. 1579 1579 */ 1580 - if (vdo_has_waiters(&lock->waiters)) 1580 + if (vdo_waitq_has_waiters(&lock->waiters)) 1581 1581 cancel_data_vio_compression(agent); 1582 1582 1583 1583 /* ··· 1928 1928 "unregistered hash lock must not be in the lock map"); 1929 1929 } 1930 1930 1931 - ASSERT_LOG_ONLY(!vdo_has_waiters(&lock->waiters), 1931 + ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&lock->waiters), 1932 1932 "hash lock returned to zone must have no waiters"); 1933 1933 ASSERT_LOG_ONLY((lock->duplicate_lock == NULL), 1934 1934 "hash lock returned to zone must not reference a PBN lock"); ··· 2812 2812 lock, state, (lock->registered ? 'D' : 'U'), 2813 2813 (unsigned long long) lock->duplicate.pbn, 2814 2814 lock->duplicate.state, lock->reference_count, 2815 - vdo_count_waiters(&lock->waiters), lock->agent); 2815 + vdo_waitq_num_waiters(&lock->waiters), lock->agent); 2816 2816 } 2817 2817 2818 2818 static const char *index_state_to_string(struct hash_zones *zones,
+6 -6
drivers/md/dm-vdo/dump.c
··· 146 146 } 147 147 148 148 /* 149 - * Dump out the data_vio waiters on a wait queue. 149 + * Dump out the data_vio waiters on a waitq. 150 150 * wait_on should be the label to print for queue (e.g. logical or physical) 151 151 */ 152 - static void dump_vio_waiters(struct wait_queue *queue, char *wait_on) 152 + static void dump_vio_waiters(struct vdo_wait_queue *waitq, char *wait_on) 153 153 { 154 - struct waiter *waiter, *first = vdo_get_first_waiter(queue); 154 + struct vdo_waiter *waiter, *first = vdo_waitq_get_first_waiter(waitq); 155 155 struct data_vio *data_vio; 156 156 157 157 if (first == NULL) 158 158 return; 159 159 160 - data_vio = waiter_as_data_vio(first); 160 + data_vio = vdo_waiter_as_data_vio(first); 161 161 162 162 uds_log_info(" %s is locked. Waited on by: vio %px pbn %llu lbn %llu d-pbn %llu lastOp %s", 163 163 wait_on, data_vio, data_vio->allocation.pbn, data_vio->logical.lbn, 164 164 data_vio->duplicate.pbn, get_data_vio_operation_name(data_vio)); 165 165 166 166 for (waiter = first->next_waiter; waiter != first; waiter = waiter->next_waiter) { 167 - data_vio = waiter_as_data_vio(waiter); 167 + data_vio = vdo_waiter_as_data_vio(waiter); 168 168 uds_log_info(" ... and : vio %px pbn %llu lbn %llu d-pbn %llu lastOp %s", 169 169 data_vio, data_vio->allocation.pbn, data_vio->logical.lbn, 170 170 data_vio->duplicate.pbn, ··· 177 177 * logging brevity: 178 178 * 179 179 * R => vio completion result not VDO_SUCCESS 180 - * W => vio is on a wait queue 180 + * W => vio is on a waitq 181 181 * D => vio is a duplicate 182 182 * p => vio is a partial block operation 183 183 * z => vio is a zero block
+17 -15
drivers/md/dm-vdo/flush.c
··· 31 31 /** The first unacknowledged flush generation */ 32 32 sequence_number_t first_unacknowledged_generation; 33 33 /** The queue of flush requests waiting to notify other threads */ 34 - struct wait_queue notifiers; 34 + struct vdo_wait_queue notifiers; 35 35 /** The queue of flush requests waiting for VIOs to complete */ 36 - struct wait_queue pending_flushes; 36 + struct vdo_wait_queue pending_flushes; 37 37 /** The flush generation for which notifications are being sent */ 38 38 sequence_number_t notify_generation; 39 39 /** The logical zone to notify next */ ··· 93 93 * 94 94 * Return: The wait queue entry as a vdo_flush. 95 95 */ 96 - static struct vdo_flush *waiter_as_flush(struct waiter *waiter) 96 + static struct vdo_flush *vdo_waiter_as_flush(struct vdo_waiter *waiter) 97 97 { 98 98 return container_of(waiter, struct vdo_flush, waiter); 99 99 } ··· 195 195 196 196 assert_on_flusher_thread(flusher, __func__); 197 197 198 - vdo_enqueue_waiter(&flusher->pending_flushes, 199 - vdo_dequeue_next_waiter(&flusher->notifiers)); 198 + vdo_waitq_enqueue_waiter(&flusher->pending_flushes, 199 + vdo_waitq_dequeue_next_waiter(&flusher->notifiers)); 200 200 vdo_complete_flushes(flusher); 201 - if (vdo_has_waiters(&flusher->notifiers)) 201 + if (vdo_waitq_has_waiters(&flusher->notifiers)) 202 202 notify_flush(flusher); 203 203 } 204 204 ··· 248 248 */ 249 249 static void notify_flush(struct flusher *flusher) 250 250 { 251 - struct vdo_flush *flush = waiter_as_flush(vdo_get_first_waiter(&flusher->notifiers)); 251 + struct vdo_flush *flush = 252 + vdo_waiter_as_flush(vdo_waitq_get_first_waiter(&flusher->notifiers)); 252 253 253 254 flusher->notify_generation = flush->flush_generation; 254 255 flusher->logical_zone_to_notify = &flusher->vdo->logical_zones->zones[0]; ··· 281 280 } 282 281 283 282 flush->flush_generation = flusher->flush_generation++; 284 - may_notify = !vdo_has_waiters(&flusher->notifiers); 285 - vdo_enqueue_waiter(&flusher->notifiers, &flush->waiter); 283 + may_notify = !vdo_waitq_has_waiters(&flusher->notifiers); 284 + vdo_waitq_enqueue_waiter(&flusher->notifiers, &flush->waiter); 286 285 if (may_notify) 287 286 notify_flush(flusher); 288 287 } ··· 295 294 { 296 295 bool drained; 297 296 298 - if (!vdo_is_state_draining(&flusher->state) || vdo_has_waiters(&flusher->pending_flushes)) 297 + if (!vdo_is_state_draining(&flusher->state) || 298 + vdo_waitq_has_waiters(&flusher->pending_flushes)) 299 299 return; 300 300 301 301 spin_lock(&flusher->lock); ··· 323 321 min(oldest_active_generation, 324 322 READ_ONCE(zone->oldest_active_generation)); 325 323 326 - while (vdo_has_waiters(&flusher->pending_flushes)) { 324 + while (vdo_waitq_has_waiters(&flusher->pending_flushes)) { 327 325 struct vdo_flush *flush = 328 - waiter_as_flush(vdo_get_first_waiter(&flusher->pending_flushes)); 326 + vdo_waiter_as_flush(vdo_waitq_get_first_waiter(&flusher->pending_flushes)); 329 327 330 328 if (flush->flush_generation >= oldest_active_generation) 331 329 return; ··· 335 333 "acknowledged next expected flush, %llu, was: %llu", 336 334 (unsigned long long) flusher->first_unacknowledged_generation, 337 335 (unsigned long long) flush->flush_generation); 338 - vdo_dequeue_next_waiter(&flusher->pending_flushes); 336 + vdo_waitq_dequeue_next_waiter(&flusher->pending_flushes); 339 337 vdo_complete_flush(flush); 340 338 flusher->first_unacknowledged_generation++; 341 339 } ··· 354 352 (unsigned long long) flusher->flush_generation, 355 353 (unsigned long long) flusher->first_unacknowledged_generation); 356 354 uds_log_info(" notifiers queue is %s; pending_flushes queue is %s", 357 - (vdo_has_waiters(&flusher->notifiers) ? "not empty" : "empty"), 358 - (vdo_has_waiters(&flusher->pending_flushes) ? "not empty" : "empty")); 355 + (vdo_waitq_has_waiters(&flusher->notifiers) ? "not empty" : "empty"), 356 + (vdo_waitq_has_waiters(&flusher->pending_flushes) ? "not empty" : "empty")); 359 357 } 360 358 361 359 /**
+1 -1
drivers/md/dm-vdo/flush.h
··· 18 18 /* The flush bios covered by this request */ 19 19 struct bio_list bios; 20 20 /* The wait queue entry for this flush */ 21 - struct waiter waiter; 21 + struct vdo_waiter waiter; 22 22 /* Which flush this struct represents */ 23 23 sequence_number_t flush_generation; 24 24 };
+2 -2
drivers/md/dm-vdo/physical-zone.c
··· 519 519 * @waiter: The allocating_vio that was waiting to allocate. 520 520 * @context: The context (unused). 521 521 */ 522 - static void retry_allocation(struct waiter *waiter, void *context __always_unused) 522 + static void retry_allocation(struct vdo_waiter *waiter, void *context __always_unused) 523 523 { 524 - struct data_vio *data_vio = waiter_as_data_vio(waiter); 524 + struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter); 525 525 526 526 /* Now that some slab has scrubbed, restart the allocation process. */ 527 527 data_vio->allocation.wait_for_clean_slab = false;
+36 -33
drivers/md/dm-vdo/recovery-journal.c
··· 267 267 * Invoked whenever a data_vio is to be released from the journal, either because its entry was 268 268 * committed to disk, or because there was an error. Implements waiter_callback_fn. 269 269 */ 270 - static void continue_waiter(struct waiter *waiter, void *context) 270 + static void continue_waiter(struct vdo_waiter *waiter, void *context) 271 271 { 272 - continue_data_vio_with_error(waiter_as_data_vio(waiter), *((int *) context)); 272 + continue_data_vio_with_error(vdo_waiter_as_data_vio(waiter), *((int *) context)); 273 273 } 274 274 275 275 /** ··· 287 287 * has waiters. 288 288 */ 289 289 return ((block != NULL) && 290 - (vdo_has_waiters(&block->entry_waiters) || 291 - vdo_has_waiters(&block->commit_waiters))); 290 + (vdo_waitq_has_waiters(&block->entry_waiters) || 291 + vdo_waitq_has_waiters(&block->commit_waiters))); 292 292 } 293 293 294 294 static void recycle_journal_blocks(struct recovery_journal *journal); ··· 343 343 recycle_journal_blocks(journal); 344 344 345 345 /* Release any data_vios waiting to be assigned entries. */ 346 - vdo_notify_all_waiters(&journal->entry_waiters, continue_waiter, 347 - &result); 346 + vdo_waitq_notify_all_waiters(&journal->entry_waiters, 347 + continue_waiter, &result); 348 348 } 349 349 350 350 if (!vdo_is_state_draining(&journal->state) || 351 351 journal->reaping || 352 352 has_block_waiters(journal) || 353 - vdo_has_waiters(&journal->entry_waiters) || 353 + vdo_waitq_has_waiters(&journal->entry_waiters) || 354 354 !suspend_lock_counter(&journal->lock_counter)) 355 355 return; 356 356 ··· 721 721 722 722 INIT_LIST_HEAD(&journal->free_tail_blocks); 723 723 INIT_LIST_HEAD(&journal->active_tail_blocks); 724 - vdo_initialize_wait_queue(&journal->pending_writes); 724 + vdo_waitq_init(&journal->pending_writes); 725 725 726 726 journal->thread_id = vdo->thread_config.journal_thread; 727 727 journal->origin = partition->offset; ··· 1047 1047 struct recovery_journal_block *block) 1048 1048 { 1049 1049 if (!block->committing) 1050 - vdo_enqueue_waiter(&journal->pending_writes, &block->write_waiter); 1050 + vdo_waitq_enqueue_waiter(&journal->pending_writes, &block->write_waiter); 1051 1051 /* 1052 1052 * At the end of adding entries, or discovering this partial block is now full and ready to 1053 1053 * rewrite, we will call write_blocks() and write a whole batch. ··· 1084 1084 * 1085 1085 * Implements waiter_callback_fn. 1086 1086 */ 1087 - static void assign_entry(struct waiter *waiter, void *context) 1087 + static void assign_entry(struct vdo_waiter *waiter, void *context) 1088 1088 { 1089 - struct data_vio *data_vio = waiter_as_data_vio(waiter); 1089 + struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter); 1090 1090 struct recovery_journal_block *block = context; 1091 1091 struct recovery_journal *journal = block->journal; 1092 1092 ··· 1099 1099 update_usages(journal, data_vio); 1100 1100 journal->available_space--; 1101 1101 1102 - if (!vdo_has_waiters(&block->entry_waiters)) 1102 + if (!vdo_waitq_has_waiters(&block->entry_waiters)) 1103 1103 journal->events.blocks.started++; 1104 1104 1105 - vdo_enqueue_waiter(&block->entry_waiters, &data_vio->waiter); 1105 + vdo_waitq_enqueue_waiter(&block->entry_waiters, &data_vio->waiter); 1106 1106 block->entry_count++; 1107 1107 block->uncommitted_entry_count++; 1108 1108 journal->events.entries.started++; ··· 1127 1127 } 1128 1128 1129 1129 journal->adding_entries = true; 1130 - while (vdo_has_waiters(&journal->entry_waiters) && prepare_to_assign_entry(journal)) { 1131 - vdo_notify_next_waiter(&journal->entry_waiters, assign_entry, 1132 - journal->active_block); 1130 + while (vdo_waitq_has_waiters(&journal->entry_waiters) && 1131 + prepare_to_assign_entry(journal)) { 1132 + vdo_waitq_notify_next_waiter(&journal->entry_waiters, 1133 + assign_entry, journal->active_block); 1133 1134 } 1134 1135 1135 1136 /* Now that we've finished with entries, see if we have a batch of blocks to write. */ ··· 1171 1170 * 1172 1171 * Implements waiter_callback_fn. 1173 1172 */ 1174 - static void continue_committed_waiter(struct waiter *waiter, void *context) 1173 + static void continue_committed_waiter(struct vdo_waiter *waiter, void *context) 1175 1174 { 1176 - struct data_vio *data_vio = waiter_as_data_vio(waiter); 1175 + struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter); 1177 1176 struct recovery_journal *journal = context; 1178 1177 int result = (is_read_only(journal) ? VDO_READ_ONLY : VDO_SUCCESS); 1179 1178 bool has_decrement; ··· 1217 1216 if (block->committing) 1218 1217 return; 1219 1218 1220 - vdo_notify_all_waiters(&block->commit_waiters, continue_committed_waiter, 1221 - journal); 1219 + vdo_waitq_notify_all_waiters(&block->commit_waiters, 1220 + continue_committed_waiter, journal); 1222 1221 if (is_read_only(journal)) { 1223 - vdo_notify_all_waiters(&block->entry_waiters, 1224 - continue_committed_waiter, journal); 1222 + vdo_waitq_notify_all_waiters(&block->entry_waiters, 1223 + continue_committed_waiter, 1224 + journal); 1225 1225 } else if (is_block_dirty(block) || !is_block_full(block)) { 1226 1226 /* Stop at partially-committed or partially-filled blocks. */ 1227 1227 return; ··· 1330 1328 */ 1331 1329 static void add_queued_recovery_entries(struct recovery_journal_block *block) 1332 1330 { 1333 - while (vdo_has_waiters(&block->entry_waiters)) { 1331 + while (vdo_waitq_has_waiters(&block->entry_waiters)) { 1334 1332 struct data_vio *data_vio = 1335 - waiter_as_data_vio(vdo_dequeue_next_waiter(&block->entry_waiters)); 1333 + vdo_waiter_as_data_vio(vdo_waitq_dequeue_next_waiter(&block->entry_waiters)); 1336 1334 struct tree_lock *lock = &data_vio->tree_lock; 1337 1335 struct packed_recovery_journal_entry *packed_entry; 1338 1336 struct recovery_journal_entry new_entry; ··· 1359 1357 data_vio->recovery_sequence_number = block->sequence_number; 1360 1358 1361 1359 /* Enqueue the data_vio to wait for its entry to commit. */ 1362 - vdo_enqueue_waiter(&block->commit_waiters, &data_vio->waiter); 1360 + vdo_waitq_enqueue_waiter(&block->commit_waiters, &data_vio->waiter); 1363 1361 } 1364 1362 } 1365 1363 ··· 1368 1366 * 1369 1367 * Implements waiter_callback_fn. 1370 1368 */ 1371 - static void write_block(struct waiter *waiter, void *context __always_unused) 1369 + static void write_block(struct vdo_waiter *waiter, void *context __always_unused) 1372 1370 { 1373 1371 struct recovery_journal_block *block = 1374 1372 container_of(waiter, struct recovery_journal_block, write_waiter); 1375 1373 struct recovery_journal *journal = block->journal; 1376 1374 struct packed_journal_header *header = get_block_header(block); 1377 1375 1378 - if (block->committing || !vdo_has_waiters(&block->entry_waiters) || is_read_only(journal)) 1376 + if (block->committing || !vdo_waitq_has_waiters(&block->entry_waiters) || 1377 + is_read_only(journal)) 1379 1378 return; 1380 1379 1381 - block->entries_in_commit = vdo_count_waiters(&block->entry_waiters); 1380 + block->entries_in_commit = vdo_waitq_num_waiters(&block->entry_waiters); 1382 1381 add_queued_recovery_entries(block); 1383 1382 1384 1383 journal->pending_write_count += 1; ··· 1422 1419 return; 1423 1420 1424 1421 /* Write all the full blocks. */ 1425 - vdo_notify_all_waiters(&journal->pending_writes, write_block, NULL); 1422 + vdo_waitq_notify_all_waiters(&journal->pending_writes, write_block, NULL); 1426 1423 1427 1424 /* 1428 1425 * Do we need to write the active block? Only if we have no outstanding writes, even after ··· 1462 1459 "journal lock not held for new entry"); 1463 1460 1464 1461 vdo_advance_journal_point(&journal->append_point, journal->entries_per_block); 1465 - vdo_enqueue_waiter(&journal->entry_waiters, &data_vio->waiter); 1462 + vdo_waitq_enqueue_waiter(&journal->entry_waiters, &data_vio->waiter); 1466 1463 assign_entries(journal); 1467 1464 } 1468 1465 ··· 1724 1721 uds_log_info(" sequence number %llu; entries %u; %s; %zu entry waiters; %zu commit waiters", 1725 1722 (unsigned long long) block->sequence_number, block->entry_count, 1726 1723 (block->committing ? "committing" : "waiting"), 1727 - vdo_count_waiters(&block->entry_waiters), 1728 - vdo_count_waiters(&block->commit_waiters)); 1724 + vdo_waitq_num_waiters(&block->entry_waiters), 1725 + vdo_waitq_num_waiters(&block->commit_waiters)); 1729 1726 } 1730 1727 1731 1728 /** ··· 1748 1745 (unsigned long long) journal->slab_journal_reap_head, 1749 1746 (unsigned long long) stats.disk_full, 1750 1747 (unsigned long long) stats.slab_journal_commits_requested, 1751 - vdo_count_waiters(&journal->entry_waiters)); 1748 + vdo_waitq_num_waiters(&journal->entry_waiters)); 1752 1749 uds_log_info(" entries: started=%llu written=%llu committed=%llu", 1753 1750 (unsigned long long) stats.entries.started, 1754 1751 (unsigned long long) stats.entries.written,
+5 -5
drivers/md/dm-vdo/recovery-journal.h
··· 113 113 /* The doubly linked pointers for the free or active lists */ 114 114 struct list_head list_node; 115 115 /* The waiter for the pending full block list */ 116 - struct waiter write_waiter; 116 + struct vdo_waiter write_waiter; 117 117 /* The journal to which this block belongs */ 118 118 struct recovery_journal *journal; 119 119 /* A pointer to the current sector in the packed block buffer */ ··· 133 133 /* The number of new entries in the current commit */ 134 134 journal_entry_count_t entries_in_commit; 135 135 /* The queue of vios which will make entries for the next commit */ 136 - struct wait_queue entry_waiters; 136 + struct vdo_wait_queue entry_waiters; 137 137 /* The queue of vios waiting for the current commit */ 138 - struct wait_queue commit_waiters; 138 + struct vdo_wait_queue commit_waiters; 139 139 }; 140 140 141 141 struct recovery_journal { ··· 146 146 /* The block map which can hold locks on this journal */ 147 147 struct block_map *block_map; 148 148 /* The queue of vios waiting to make entries */ 149 - struct wait_queue entry_waiters; 149 + struct vdo_wait_queue entry_waiters; 150 150 /* The number of free entries in the journal */ 151 151 u64 available_space; 152 152 /* The number of decrement entries which need to be made */ ··· 184 184 /* A pointer to the active block (the one we are adding entries to now) */ 185 185 struct recovery_journal_block *active_block; 186 186 /* Journal blocks that need writing */ 187 - struct wait_queue pending_writes; 187 + struct vdo_wait_queue pending_writes; 188 188 /* The new block map reap head after reaping */ 189 189 sequence_number_t block_map_reap_head; 190 190 /* The head block number for the block map rebuild range */
+50 -49
drivers/md/dm-vdo/slab-depot.c
··· 65 65 static inline bool __must_check must_make_entries_to_flush(struct slab_journal *journal) 66 66 { 67 67 return ((journal->slab->status != VDO_SLAB_REBUILDING) && 68 - vdo_has_waiters(&journal->entry_waiters)); 68 + vdo_waitq_has_waiters(&journal->entry_waiters)); 69 69 } 70 70 71 71 /** ··· 122 122 123 123 static void add_entries(struct slab_journal *journal); 124 124 static void update_tail_block_location(struct slab_journal *journal); 125 - static void release_journal_locks(struct waiter *waiter, void *context); 125 + static void release_journal_locks(struct vdo_waiter *waiter, void *context); 126 126 127 127 /** 128 128 * is_slab_journal_blank() - Check whether a slab's journal is blank. ··· 184 184 code = vdo_get_admin_state_code(&slab->state); 185 185 read_only = vdo_is_read_only(slab->allocator->depot->vdo); 186 186 if (!read_only && 187 - vdo_has_waiters(&slab->dirty_blocks) && 187 + vdo_waitq_has_waiters(&slab->dirty_blocks) && 188 188 (code != VDO_ADMIN_STATE_SUSPENDING) && 189 189 (code != VDO_ADMIN_STATE_RECOVERING)) 190 190 return; ··· 229 229 */ 230 230 static void check_summary_drain_complete(struct block_allocator *allocator) 231 231 { 232 - struct vdo *vdo = allocator->depot->vdo; 233 - 234 232 if (!vdo_is_state_draining(&allocator->summary_state) || 235 233 (allocator->summary_write_count > 0)) 236 234 return; 237 235 238 236 vdo_finish_operation(&allocator->summary_state, 239 - (vdo_is_read_only(vdo) ? VDO_READ_ONLY : VDO_SUCCESS)); 237 + (vdo_is_read_only(allocator->depot->vdo) ? 238 + VDO_READ_ONLY : VDO_SUCCESS)); 240 239 } 241 240 242 241 /** ··· 244 245 * @queue: The queue to notify. 245 246 */ 246 247 static void notify_summary_waiters(struct block_allocator *allocator, 247 - struct wait_queue *queue) 248 + struct vdo_wait_queue *queue) 248 249 { 249 - int result = (vdo_is_read_only(allocator->depot->vdo) ? VDO_READ_ONLY : VDO_SUCCESS); 250 + int result = (vdo_is_read_only(allocator->depot->vdo) ? 251 + VDO_READ_ONLY : VDO_SUCCESS); 250 252 251 - vdo_notify_all_waiters(queue, NULL, &result); 253 + vdo_waitq_notify_all_waiters(queue, NULL, &result); 252 254 } 253 255 254 256 static void launch_write(struct slab_summary_block *summary_block); ··· 264 264 notify_summary_waiters(block->allocator, &block->current_update_waiters); 265 265 block->writing = false; 266 266 block->allocator->summary_write_count--; 267 - if (vdo_has_waiters(&block->next_update_waiters)) 267 + if (vdo_waitq_has_waiters(&block->next_update_waiters)) 268 268 launch_write(block); 269 269 else 270 270 check_summary_drain_complete(block->allocator); ··· 320 320 return; 321 321 322 322 allocator->summary_write_count++; 323 - vdo_transfer_all_waiters(&block->next_update_waiters, 324 - &block->current_update_waiters); 323 + vdo_waitq_transfer_all_waiters(&block->next_update_waiters, 324 + &block->current_update_waiters); 325 325 block->writing = true; 326 326 327 327 if (vdo_is_read_only(depot->vdo)) { ··· 351 351 * @is_clean: Whether the slab is clean. 352 352 * @free_blocks: The number of free blocks. 353 353 */ 354 - static void update_slab_summary_entry(struct vdo_slab *slab, struct waiter *waiter, 354 + static void update_slab_summary_entry(struct vdo_slab *slab, struct vdo_waiter *waiter, 355 355 tail_block_offset_t tail_block_offset, 356 356 bool load_ref_counts, bool is_clean, 357 357 block_count_t free_blocks) ··· 382 382 .is_dirty = !is_clean, 383 383 .fullness_hint = compute_fullness_hint(allocator->depot, free_blocks), 384 384 }; 385 - vdo_enqueue_waiter(&block->next_update_waiters, waiter); 385 + vdo_waitq_enqueue_waiter(&block->next_update_waiters, waiter); 386 386 launch_write(block); 387 387 } 388 388 ··· 441 441 * @waiter: The journal as a flush waiter. 442 442 * @context: The newly acquired flush vio. 443 443 */ 444 - static void flush_for_reaping(struct waiter *waiter, void *context) 444 + static void flush_for_reaping(struct vdo_waiter *waiter, void *context) 445 445 { 446 446 struct slab_journal *journal = 447 447 container_of(waiter, struct slab_journal, flush_waiter); ··· 550 550 * 551 551 * Implements waiter_callback_fn. 552 552 */ 553 - static void release_journal_locks(struct waiter *waiter, void *context) 553 + static void release_journal_locks(struct vdo_waiter *waiter, void *context) 554 554 { 555 555 sequence_number_t first, i; 556 556 struct slab_journal *journal = ··· 734 734 * 735 735 * Callback from acquire_vio_from_pool() registered in commit_tail(). 736 736 */ 737 - static void write_slab_journal_block(struct waiter *waiter, void *context) 737 + static void write_slab_journal_block(struct vdo_waiter *waiter, void *context) 738 738 { 739 739 struct pooled_vio *pooled = context; 740 740 struct vio *vio = &pooled->vio; ··· 1006 1006 } 1007 1007 1008 1008 /** finish_summary_update() - A waiter callback that resets the writing state of a slab. */ 1009 - static void finish_summary_update(struct waiter *waiter, void *context) 1009 + static void finish_summary_update(struct vdo_waiter *waiter, void *context) 1010 1010 { 1011 1011 struct vdo_slab *slab = container_of(waiter, struct vdo_slab, summary_waiter); 1012 1012 int result = *((int *) context); ··· 1021 1021 check_if_slab_drained(slab); 1022 1022 } 1023 1023 1024 - static void write_reference_block(struct waiter *waiter, void *context); 1024 + static void write_reference_block(struct vdo_waiter *waiter, void *context); 1025 1025 1026 1026 /** 1027 1027 * launch_reference_block_write() - Launch the write of a dirty reference block by first acquiring ··· 1032 1032 * This can be asynchronous since the writer will have to wait if all VIOs in the pool are 1033 1033 * currently in use. 1034 1034 */ 1035 - static void launch_reference_block_write(struct waiter *waiter, void *context) 1035 + static void launch_reference_block_write(struct vdo_waiter *waiter, void *context) 1036 1036 { 1037 1037 struct vdo_slab *slab = context; 1038 1038 ··· 1047 1047 1048 1048 static void save_dirty_reference_blocks(struct vdo_slab *slab) 1049 1049 { 1050 - vdo_notify_all_waiters(&slab->dirty_blocks, launch_reference_block_write, slab); 1050 + vdo_waitq_notify_all_waiters(&slab->dirty_blocks, 1051 + launch_reference_block_write, slab); 1051 1052 check_if_slab_drained(slab); 1052 1053 } 1053 1054 ··· 1085 1084 1086 1085 /* Re-queue the block if it was re-dirtied while it was writing. */ 1087 1086 if (block->is_dirty) { 1088 - vdo_enqueue_waiter(&block->slab->dirty_blocks, &block->waiter); 1087 + vdo_waitq_enqueue_waiter(&block->slab->dirty_blocks, &block->waiter); 1089 1088 if (vdo_is_state_draining(&slab->state)) { 1090 1089 /* We must be saving, and this block will otherwise not be relaunched. */ 1091 1090 save_dirty_reference_blocks(slab); ··· 1098 1097 * Mark the slab as clean in the slab summary if there are no dirty or writing blocks 1099 1098 * and no summary update in progress. 1100 1099 */ 1101 - if ((slab->active_count > 0) || vdo_has_waiters(&slab->dirty_blocks)) { 1100 + if ((slab->active_count > 0) || vdo_waitq_has_waiters(&slab->dirty_blocks)) { 1102 1101 check_if_slab_drained(slab); 1103 1102 return; 1104 1103 } ··· 1176 1175 * @waiter: The waiter of the dirty block. 1177 1176 * @context: The VIO returned by the pool. 1178 1177 */ 1179 - static void write_reference_block(struct waiter *waiter, void *context) 1178 + static void write_reference_block(struct vdo_waiter *waiter, void *context) 1180 1179 { 1181 1180 size_t block_offset; 1182 1181 physical_block_number_t pbn; ··· 1214 1213 { 1215 1214 block_count_t length = journal_length(journal); 1216 1215 struct vdo_slab *slab = journal->slab; 1217 - block_count_t write_count = vdo_count_waiters(&slab->dirty_blocks); 1216 + block_count_t write_count = vdo_waitq_num_waiters(&slab->dirty_blocks); 1218 1217 block_count_t written; 1219 1218 1220 1219 if ((length < journal->flushing_threshold) || (write_count == 0)) ··· 1229 1228 } 1230 1229 1231 1230 for (written = 0; written < write_count; written++) { 1232 - vdo_notify_next_waiter(&slab->dirty_blocks, 1233 - launch_reference_block_write, slab); 1231 + vdo_waitq_notify_next_waiter(&slab->dirty_blocks, 1232 + launch_reference_block_write, slab); 1234 1233 } 1235 1234 } 1236 1235 ··· 1264 1263 1265 1264 block->is_dirty = true; 1266 1265 if (!block->is_writing) 1267 - vdo_enqueue_waiter(&block->slab->dirty_blocks, &block->waiter); 1266 + vdo_waitq_enqueue_waiter(&block->slab->dirty_blocks, &block->waiter); 1268 1267 } 1269 1268 1270 1269 /** ··· 1679 1678 * This callback is invoked by add_entries() once it has determined that we are ready to make 1680 1679 * another entry in the slab journal. Implements waiter_callback_fn. 1681 1680 */ 1682 - static void add_entry_from_waiter(struct waiter *waiter, void *context) 1681 + static void add_entry_from_waiter(struct vdo_waiter *waiter, void *context) 1683 1682 { 1684 1683 int result; 1685 1684 struct reference_updater *updater = ··· 1745 1744 */ 1746 1745 static inline bool is_next_entry_a_block_map_increment(struct slab_journal *journal) 1747 1746 { 1748 - struct waiter *waiter = vdo_get_first_waiter(&journal->entry_waiters); 1747 + struct vdo_waiter *waiter = vdo_waitq_get_first_waiter(&journal->entry_waiters); 1749 1748 struct reference_updater *updater = container_of(waiter, 1750 1749 struct reference_updater, 1751 1750 waiter); ··· 1768 1767 } 1769 1768 1770 1769 journal->adding_entries = true; 1771 - while (vdo_has_waiters(&journal->entry_waiters)) { 1770 + while (vdo_waitq_has_waiters(&journal->entry_waiters)) { 1772 1771 struct slab_journal_block_header *header = &journal->tail_header; 1773 1772 1774 1773 if (journal->partial_write_in_progress || ··· 1865 1864 } 1866 1865 } 1867 1866 1868 - vdo_notify_next_waiter(&journal->entry_waiters, 1869 - add_entry_from_waiter, journal); 1867 + vdo_waitq_notify_next_waiter(&journal->entry_waiters, 1868 + add_entry_from_waiter, journal); 1870 1869 } 1871 1870 1872 1871 journal->adding_entries = false; ··· 1874 1873 /* If there are no waiters, and we are flushing or saving, commit the tail block. */ 1875 1874 if (vdo_is_state_draining(&journal->slab->state) && 1876 1875 !vdo_is_state_suspending(&journal->slab->state) && 1877 - !vdo_has_waiters(&journal->entry_waiters)) 1876 + !vdo_waitq_has_waiters(&journal->entry_waiters)) 1878 1877 commit_tail(journal); 1879 1878 } 1880 1879 ··· 2260 2259 * @waiter: The waiter of the block to load. 2261 2260 * @context: The VIO returned by the pool. 2262 2261 */ 2263 - static void load_reference_block(struct waiter *waiter, void *context) 2262 + static void load_reference_block(struct vdo_waiter *waiter, void *context) 2264 2263 { 2265 2264 struct pooled_vio *pooled = context; 2266 2265 struct vio *vio = &pooled->vio; ··· 2285 2284 slab->free_blocks = slab->block_count; 2286 2285 slab->active_count = slab->reference_block_count; 2287 2286 for (i = 0; i < slab->reference_block_count; i++) { 2288 - struct waiter *waiter = &slab->reference_blocks[i].waiter; 2287 + struct vdo_waiter *waiter = &slab->reference_blocks[i].waiter; 2289 2288 2290 2289 waiter->callback = load_reference_block; 2291 2290 acquire_vio_from_pool(slab->allocator->vio_pool, waiter); ··· 2456 2455 * 2457 2456 * This is the success callback from acquire_vio_from_pool() when loading a slab journal. 2458 2457 */ 2459 - static void read_slab_journal_tail(struct waiter *waiter, void *context) 2458 + static void read_slab_journal_tail(struct vdo_waiter *waiter, void *context) 2460 2459 { 2461 2460 struct slab_journal *journal = 2462 2461 container_of(waiter, struct slab_journal, resource_waiter); ··· 2663 2662 */ 2664 2663 static void finish_scrubbing(struct slab_scrubber *scrubber, int result) 2665 2664 { 2666 - bool notify = vdo_has_waiters(&scrubber->waiters); 2665 + bool notify = vdo_waitq_has_waiters(&scrubber->waiters); 2667 2666 bool done = !has_slabs_to_scrub(scrubber); 2668 2667 struct block_allocator *allocator = 2669 2668 container_of(scrubber, struct block_allocator, scrubber); ··· 2710 2709 * Fortunately if there were waiters, we can't have been freed yet. 2711 2710 */ 2712 2711 if (notify) 2713 - vdo_notify_all_waiters(&scrubber->waiters, NULL, NULL); 2712 + vdo_waitq_notify_all_waiters(&scrubber->waiters, NULL, NULL); 2714 2713 } 2715 2714 2716 2715 static void scrub_next_slab(struct slab_scrubber *scrubber); ··· 2934 2933 * Note: this notify call is always safe only because scrubbing can only be started when 2935 2934 * the VDO is quiescent. 2936 2935 */ 2937 - vdo_notify_all_waiters(&scrubber->waiters, NULL, NULL); 2936 + vdo_waitq_notify_all_waiters(&scrubber->waiters, NULL, NULL); 2938 2937 2939 2938 if (vdo_is_read_only(completion->vdo)) { 2940 2939 finish_scrubbing(scrubber, VDO_READ_ONLY); ··· 3054 3053 * This callback is invoked on all vios waiting to make slab journal entries after the VDO has gone 3055 3054 * into read-only mode. Implements waiter_callback_fn. 3056 3055 */ 3057 - static void abort_waiter(struct waiter *waiter, void *context __always_unused) 3056 + static void abort_waiter(struct vdo_waiter *waiter, void *context __always_unused) 3058 3057 { 3059 3058 struct reference_updater *updater = 3060 3059 container_of(waiter, struct reference_updater, waiter); ··· 3080 3079 while (iterator.next != NULL) { 3081 3080 struct vdo_slab *slab = next_slab(&iterator); 3082 3081 3083 - vdo_notify_all_waiters(&slab->journal.entry_waiters, 3084 - abort_waiter, &slab->journal); 3082 + vdo_waitq_notify_all_waiters(&slab->journal.entry_waiters, 3083 + abort_waiter, &slab->journal); 3085 3084 check_if_slab_drained(slab); 3086 3085 } 3087 3086 ··· 3211 3210 * some other error otherwise. 3212 3211 */ 3213 3212 int vdo_enqueue_clean_slab_waiter(struct block_allocator *allocator, 3214 - struct waiter *waiter) 3213 + struct vdo_waiter *waiter) 3215 3214 { 3216 3215 if (vdo_is_read_only(allocator->depot->vdo)) 3217 3216 return VDO_READ_ONLY; ··· 3219 3218 if (vdo_is_state_quiescent(&allocator->scrubber.admin_state)) 3220 3219 return VDO_NO_SPACE; 3221 3220 3222 - vdo_enqueue_waiter(&allocator->scrubber.waiters, waiter); 3221 + vdo_waitq_enqueue_waiter(&allocator->scrubber.waiters, waiter); 3223 3222 return VDO_SUCCESS; 3224 3223 } 3225 3224 ··· 3245 3244 return; 3246 3245 } 3247 3246 3248 - vdo_enqueue_waiter(&slab->journal.entry_waiters, &updater->waiter); 3247 + vdo_waitq_enqueue_waiter(&slab->journal.entry_waiters, &updater->waiter); 3249 3248 if ((slab->status != VDO_SLAB_REBUILT) && requires_reaping(&slab->journal)) 3250 3249 register_slab_for_scrubbing(slab, true); 3251 3250 ··· 3588 3587 } 3589 3588 3590 3589 uds_log_info(" slab journal: entry_waiters=%zu waiting_to_commit=%s updating_slab_summary=%s head=%llu unreapable=%llu tail=%llu next_commit=%llu summarized=%llu last_summarized=%llu recovery_lock=%llu dirty=%s", 3591 - vdo_count_waiters(&journal->entry_waiters), 3590 + vdo_waitq_num_waiters(&journal->entry_waiters), 3592 3591 uds_bool_to_string(journal->waiting_to_commit), 3593 3592 uds_bool_to_string(journal->updating_slab_summary), 3594 3593 (unsigned long long) journal->head, ··· 3609 3608 uds_log_info(" slab: free=%u/%u blocks=%u dirty=%zu active=%zu journal@(%llu,%u)", 3610 3609 slab->free_blocks, slab->block_count, 3611 3610 slab->reference_block_count, 3612 - vdo_count_waiters(&slab->dirty_blocks), 3611 + vdo_waitq_num_waiters(&slab->dirty_blocks), 3613 3612 slab->active_count, 3614 3613 (unsigned long long) slab->slab_journal_point.sequence_number, 3615 3614 slab->slab_journal_point.entry_count); ··· 3629 3628 3630 3629 uds_log_info("slab_scrubber slab_count %u waiters %zu %s%s", 3631 3630 READ_ONCE(scrubber->slab_count), 3632 - vdo_count_waiters(&scrubber->waiters), 3631 + vdo_waitq_num_waiters(&scrubber->waiters), 3633 3632 vdo_get_admin_state_code(&scrubber->admin_state)->name, 3634 3633 scrubber->high_priority_only ? ", high_priority_only " : ""); 3635 3634 }
+11 -11
drivers/md/dm-vdo/slab-depot.h
··· 60 60 61 61 struct slab_journal { 62 62 /* A waiter object for getting a VIO pool entry */ 63 - struct waiter resource_waiter; 63 + struct vdo_waiter resource_waiter; 64 64 /* A waiter object for updating the slab summary */ 65 - struct waiter slab_summary_waiter; 65 + struct vdo_waiter slab_summary_waiter; 66 66 /* A waiter object for getting a vio with which to flush */ 67 - struct waiter flush_waiter; 67 + struct vdo_waiter flush_waiter; 68 68 /* The queue of VIOs waiting to make an entry */ 69 - struct wait_queue entry_waiters; 69 + struct vdo_wait_queue entry_waiters; 70 70 /* The parent slab reference of this journal */ 71 71 struct vdo_slab *slab; 72 72 ··· 149 149 */ 150 150 struct reference_block { 151 151 /* This block waits on the ref_counts to tell it to write */ 152 - struct waiter waiter; 152 + struct vdo_waiter waiter; 153 153 /* The slab to which this reference_block belongs */ 154 154 struct vdo_slab *slab; 155 155 /* The number of references in this block that represent allocations */ ··· 241 241 struct search_cursor search_cursor; 242 242 243 243 /* A list of the dirty blocks waiting to be written out */ 244 - struct wait_queue dirty_blocks; 244 + struct vdo_wait_queue dirty_blocks; 245 245 /* The number of blocks which are currently writing */ 246 246 size_t active_count; 247 247 248 248 /* A waiter object for updating the slab summary */ 249 - struct waiter summary_waiter; 249 + struct vdo_waiter summary_waiter; 250 250 251 251 /* The latest slab journal for which there has been a reference count update */ 252 252 struct journal_point slab_journal_point; ··· 271 271 /* The queue of slabs to scrub once there are no high_priority_slabs */ 272 272 struct list_head slabs; 273 273 /* The queue of VIOs waiting for a slab to be scrubbed */ 274 - struct wait_queue waiters; 274 + struct vdo_wait_queue waiters; 275 275 276 276 /* 277 277 * The number of slabs that are unrecovered or being scrubbed. This field is modified by ··· 341 341 /* Whether this block has a write outstanding */ 342 342 bool writing; 343 343 /* Ring of updates waiting on the outstanding write */ 344 - struct wait_queue current_update_waiters; 344 + struct vdo_wait_queue current_update_waiters; 345 345 /* Ring of updates waiting on the next write */ 346 - struct wait_queue next_update_waiters; 346 + struct vdo_wait_queue next_update_waiters; 347 347 /* The active slab_summary_entry array for this block */ 348 348 struct slab_summary_entry *entries; 349 349 /* The vio used to write this block */ ··· 522 522 physical_block_number_t *block_number_ptr); 523 523 524 524 int vdo_enqueue_clean_slab_waiter(struct block_allocator *allocator, 525 - struct waiter *waiter); 525 + struct vdo_waiter *waiter); 526 526 527 527 void vdo_modify_reference_count(struct vdo_completion *completion, 528 528 struct reference_updater *updater);
+6 -6
drivers/md/dm-vdo/vio.c
··· 25 25 /** The list of objects which are available */ 26 26 struct list_head available; 27 27 /** The queue of requestors waiting for objects from the pool */ 28 - struct wait_queue waiting; 28 + struct vdo_wait_queue waiting; 29 29 /** The number of objects currently in use */ 30 30 size_t busy_count; 31 31 /** The list of objects which are in use */ ··· 364 364 return; 365 365 366 366 /* Remove all available vios from the object pool. */ 367 - ASSERT_LOG_ONLY(!vdo_has_waiters(&pool->waiting), 367 + ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&pool->waiting), 368 368 "VIO pool must not have any waiters when being freed"); 369 369 ASSERT_LOG_ONLY((pool->busy_count == 0), 370 370 "VIO pool must not have %zu busy entries when being freed", ··· 400 400 * @pool: The vio pool. 401 401 * @waiter: Object that is requesting a vio. 402 402 */ 403 - void acquire_vio_from_pool(struct vio_pool *pool, struct waiter *waiter) 403 + void acquire_vio_from_pool(struct vio_pool *pool, struct vdo_waiter *waiter) 404 404 { 405 405 struct pooled_vio *pooled; 406 406 ··· 408 408 "acquire from active vio_pool called from correct thread"); 409 409 410 410 if (list_empty(&pool->available)) { 411 - vdo_enqueue_waiter(&pool->waiting, waiter); 411 + vdo_waitq_enqueue_waiter(&pool->waiting, waiter); 412 412 return; 413 413 } 414 414 ··· 430 430 431 431 vio->vio.completion.error_handler = NULL; 432 432 vio->vio.completion.parent = NULL; 433 - if (vdo_has_waiters(&pool->waiting)) { 434 - vdo_notify_next_waiter(&pool->waiting, NULL, vio); 433 + if (vdo_waitq_has_waiters(&pool->waiting)) { 434 + vdo_waitq_notify_next_waiter(&pool->waiting, NULL, vio); 435 435 return; 436 436 } 437 437
+1 -1
drivers/md/dm-vdo/vio.h
··· 193 193 void *context, struct vio_pool **pool_ptr); 194 194 void free_vio_pool(struct vio_pool *pool); 195 195 bool __must_check is_vio_pool_busy(struct vio_pool *pool); 196 - void acquire_vio_from_pool(struct vio_pool *pool, struct waiter *waiter); 196 + void acquire_vio_from_pool(struct vio_pool *pool, struct vdo_waiter *waiter); 197 197 void return_vio_to_pool(struct vio_pool *pool, struct pooled_vio *vio); 198 198 199 199 #endif /* VIO_H */
+96 -94
drivers/md/dm-vdo/wait-queue.c
··· 12 12 #include "status-codes.h" 13 13 14 14 /** 15 - * vdo_enqueue_waiter() - Add a waiter to the tail end of a wait queue. 16 - * @queue: The queue to which to add the waiter. 17 - * @waiter: The waiter to add to the queue. 15 + * vdo_waitq_enqueue_waiter() - Add a waiter to the tail end of a waitq. 16 + * @waitq: The vdo_wait_queue to which to add the waiter. 17 + * @waiter: The waiter to add to the waitq. 18 18 * 19 - * The waiter must not already be waiting in a queue. 20 - * 21 - * Return: VDO_SUCCESS or an error code. 19 + * The waiter must not already be waiting in a waitq. 22 20 */ 23 - void vdo_enqueue_waiter(struct wait_queue *queue, struct waiter *waiter) 21 + void vdo_waitq_enqueue_waiter(struct vdo_wait_queue *waitq, struct vdo_waiter *waiter) 24 22 { 25 23 BUG_ON(waiter->next_waiter != NULL); 26 24 27 - if (queue->last_waiter == NULL) { 25 + if (waitq->last_waiter == NULL) { 28 26 /* 29 - * The queue is empty, so form the initial circular list by self-linking the 27 + * The waitq is empty, so form the initial circular list by self-linking the 30 28 * initial waiter. 31 29 */ 32 30 waiter->next_waiter = waiter; 33 31 } else { 34 - /* Splice the new waiter in at the end of the queue. */ 35 - waiter->next_waiter = queue->last_waiter->next_waiter; 36 - queue->last_waiter->next_waiter = waiter; 32 + /* Splice the new waiter in at the end of the waitq. */ 33 + waiter->next_waiter = waitq->last_waiter->next_waiter; 34 + waitq->last_waiter->next_waiter = waiter; 37 35 } 38 36 39 37 /* In both cases, the waiter we added to the ring becomes the last waiter. */ 40 - queue->last_waiter = waiter; 41 - queue->queue_length += 1; 38 + waitq->last_waiter = waiter; 39 + waitq->length += 1; 42 40 } 43 41 44 42 /** 45 - * vdo_transfer_all_waiters() - Transfer all waiters from one wait queue to a second queue, 46 - * emptying the first queue. 47 - * @from_queue: The queue containing the waiters to move. 48 - * @to_queue: The queue that will receive the waiters from the first queue. 43 + * vdo_waitq_transfer_all_waiters() - Transfer all waiters from one waitq to 44 + * a second waitq, emptying the first waitq. 45 + * @from_waitq: The waitq containing the waiters to move. 46 + * @to_waitq: The waitq that will receive the waiters from the first waitq. 49 47 */ 50 - void vdo_transfer_all_waiters(struct wait_queue *from_queue, struct wait_queue *to_queue) 48 + void vdo_waitq_transfer_all_waiters(struct vdo_wait_queue *from_waitq, 49 + struct vdo_wait_queue *to_waitq) 51 50 { 52 - /* If the source queue is empty, there's nothing to do. */ 53 - if (!vdo_has_waiters(from_queue)) 51 + /* If the source waitq is empty, there's nothing to do. */ 52 + if (!vdo_waitq_has_waiters(from_waitq)) 54 53 return; 55 54 56 - if (vdo_has_waiters(to_queue)) { 55 + if (vdo_waitq_has_waiters(to_waitq)) { 57 56 /* 58 - * Both queues are non-empty. Splice the two circular lists together by swapping 59 - * the next (head) pointers in the list tails. 57 + * Both are non-empty. Splice the two circular lists together 58 + * by swapping the next (head) pointers in the list tails. 60 59 */ 61 - struct waiter *from_head = from_queue->last_waiter->next_waiter; 62 - struct waiter *to_head = to_queue->last_waiter->next_waiter; 60 + struct vdo_waiter *from_head = from_waitq->last_waiter->next_waiter; 61 + struct vdo_waiter *to_head = to_waitq->last_waiter->next_waiter; 63 62 64 - to_queue->last_waiter->next_waiter = from_head; 65 - from_queue->last_waiter->next_waiter = to_head; 63 + to_waitq->last_waiter->next_waiter = from_head; 64 + from_waitq->last_waiter->next_waiter = to_head; 66 65 } 67 66 68 - to_queue->last_waiter = from_queue->last_waiter; 69 - to_queue->queue_length += from_queue->queue_length; 70 - vdo_initialize_wait_queue(from_queue); 67 + to_waitq->last_waiter = from_waitq->last_waiter; 68 + to_waitq->length += from_waitq->length; 69 + vdo_waitq_init(from_waitq); 71 70 } 72 71 73 72 /** 74 - * vdo_notify_all_waiters() - Notify all the entries waiting in a queue. 75 - * @queue: The wait queue containing the waiters to notify. 73 + * vdo_waitq_notify_all_waiters() - Notify all the entries waiting in a waitq. 74 + * @waitq: The vdo_wait_queue containing the waiters to notify. 76 75 * @callback: The function to call to notify each waiter, or NULL to invoke the callback field 77 76 * registered in each waiter. 78 77 * @context: The context to pass to the callback function. 79 78 * 80 - * Notifies all the entries waiting in a queue to continue execution by invoking a callback 81 - * function on each of them in turn. The queue is copied and emptied before invoking any callbacks, 82 - * and only the waiters that were in the queue at the start of the call will be notified. 79 + * Notifies all the entries waiting in a waitq to continue execution by invoking a callback 80 + * function on each of them in turn. The waitq is copied and emptied before invoking any callbacks, 81 + * and only the waiters that were in the waitq at the start of the call will be notified. 83 82 */ 84 - void vdo_notify_all_waiters(struct wait_queue *queue, waiter_callback_fn callback, 85 - void *context) 83 + void vdo_waitq_notify_all_waiters(struct vdo_wait_queue *waitq, 84 + vdo_waiter_callback_fn callback, void *context) 86 85 { 87 86 /* 88 - * Copy and empty the queue first, avoiding the possibility of an infinite loop if entries 89 - * are returned to the queue by the callback function. 87 + * Copy and empty the waitq first, avoiding the possibility of an infinite 88 + * loop if entries are returned to the waitq by the callback function. 90 89 */ 91 - struct wait_queue waiters; 90 + struct vdo_wait_queue waiters; 92 91 93 - vdo_initialize_wait_queue(&waiters); 94 - vdo_transfer_all_waiters(queue, &waiters); 92 + vdo_waitq_init(&waiters); 93 + vdo_waitq_transfer_all_waiters(waitq, &waiters); 95 94 96 - /* Drain the copied queue, invoking the callback on every entry. */ 97 - while (vdo_has_waiters(&waiters)) 98 - vdo_notify_next_waiter(&waiters, callback, context); 95 + /* Drain the copied waitq, invoking the callback on every entry. */ 96 + while (vdo_waitq_has_waiters(&waiters)) 97 + vdo_waitq_notify_next_waiter(&waiters, callback, context); 99 98 } 100 99 101 100 /** 102 - * vdo_get_first_waiter() - Return the waiter that is at the head end of a wait queue. 103 - * @queue: The queue from which to get the first waiter. 101 + * vdo_waitq_get_first_waiter() - Return the waiter that is at the head end of a waitq. 102 + * @waitq: The vdo_wait_queue from which to get the first waiter. 104 103 * 105 - * Return: The first (oldest) waiter in the queue, or NULL if the queue is empty. 104 + * Return: The first (oldest) waiter in the waitq, or NULL if the waitq is empty. 106 105 */ 107 - struct waiter *vdo_get_first_waiter(const struct wait_queue *queue) 106 + struct vdo_waiter *vdo_waitq_get_first_waiter(const struct vdo_wait_queue *waitq) 108 107 { 109 - struct waiter *last_waiter = queue->last_waiter; 108 + struct vdo_waiter *last_waiter = waitq->last_waiter; 110 109 111 110 if (last_waiter == NULL) { 112 111 /* There are no waiters, so we're done. */ 113 112 return NULL; 114 113 } 115 114 116 - /* The queue is circular, so the last entry links to the head of the queue. */ 115 + /* The waitq is circular, so the last entry links to the head of the waitq. */ 117 116 return last_waiter->next_waiter; 118 117 } 119 118 120 119 /** 121 - * vdo_dequeue_matching_waiters() - Remove all waiters that match based on the specified matching 122 - * method and append them to a wait_queue. 123 - * @queue: The wait queue to process. 124 - * @match_method: The method to determine matching. 120 + * vdo_waitq_dequeue_matching_waiters() - Remove all waiters that match based on the specified 121 + * matching method and append them to a vdo_wait_queue. 122 + * @waitq: The vdo_wait_queue to process. 123 + * @waiter_match: The method to determine matching. 125 124 * @match_context: Contextual info for the match method. 126 - * @matched_queue: A wait_queue to store matches. 125 + * @matched_waitq: A wait_waitq to store matches. 127 126 */ 128 - void vdo_dequeue_matching_waiters(struct wait_queue *queue, waiter_match_fn match_method, 129 - void *match_context, struct wait_queue *matched_queue) 127 + void vdo_waitq_dequeue_matching_waiters(struct vdo_wait_queue *waitq, 128 + vdo_waiter_match_fn waiter_match, 129 + void *match_context, 130 + struct vdo_wait_queue *matched_waitq) 130 131 { 131 - struct wait_queue matched_waiters, iteration_queue; 132 + // FIXME: copying a waitq just to iterate it, with matching, is unfortunate 133 + struct vdo_wait_queue matched_waiters, iteration_waitq; 132 134 133 - vdo_initialize_wait_queue(&matched_waiters); 135 + vdo_waitq_init(&matched_waiters); 136 + vdo_waitq_init(&iteration_waitq); 137 + vdo_waitq_transfer_all_waiters(waitq, &iteration_waitq); 134 138 135 - vdo_initialize_wait_queue(&iteration_queue); 136 - vdo_transfer_all_waiters(queue, &iteration_queue); 137 - while (vdo_has_waiters(&iteration_queue)) { 138 - struct waiter *waiter = vdo_dequeue_next_waiter(&iteration_queue); 139 + while (vdo_waitq_has_waiters(&iteration_waitq)) { 140 + struct vdo_waiter *waiter = vdo_waitq_dequeue_next_waiter(&iteration_waitq); 139 141 140 - vdo_enqueue_waiter((match_method(waiter, match_context) ? 141 - &matched_waiters : queue), waiter); 142 + vdo_waitq_enqueue_waiter((waiter_match(waiter, match_context) ? 143 + &matched_waiters : waitq), waiter); 142 144 } 143 145 144 - vdo_transfer_all_waiters(&matched_waiters, matched_queue); 146 + vdo_waitq_transfer_all_waiters(&matched_waiters, matched_waitq); 145 147 } 146 148 147 149 /** 148 - * vdo_dequeue_next_waiter() - Remove the first waiter from the head end of a wait queue. 149 - * @queue: The wait queue from which to remove the first entry. 150 + * vdo_waitq_dequeue_next_waiter() - Remove the first waiter from the head end of a waitq. 151 + * @waitq: The vdo_wait_queue from which to remove the first entry. 150 152 * 151 153 * The caller will be responsible for waking the waiter by invoking the correct callback function 152 154 * to resume its execution. 153 155 * 154 - * Return: The first (oldest) waiter in the queue, or NULL if the queue is empty. 156 + * Return: The first (oldest) waiter in the waitq, or NULL if the waitq is empty. 155 157 */ 156 - struct waiter *vdo_dequeue_next_waiter(struct wait_queue *queue) 158 + struct vdo_waiter *vdo_waitq_dequeue_next_waiter(struct vdo_wait_queue *waitq) 157 159 { 158 - struct waiter *first_waiter = vdo_get_first_waiter(queue); 159 - struct waiter *last_waiter = queue->last_waiter; 160 + struct vdo_waiter *first_waiter = vdo_waitq_get_first_waiter(waitq); 161 + struct vdo_waiter *last_waiter = waitq->last_waiter; 160 162 161 163 if (first_waiter == NULL) 162 164 return NULL; 163 165 164 166 if (first_waiter == last_waiter) { 165 - /* The queue has a single entry, so just empty it out by nulling the tail. */ 166 - queue->last_waiter = NULL; 167 + /* The waitq has a single entry, so just empty it out by nulling the tail. */ 168 + waitq->last_waiter = NULL; 167 169 } else { 168 170 /* 169 - * The queue has more than one entry, so splice the first waiter out of the 170 - * circular queue. 171 + * The waitq has more than one entry, so splice the first waiter out of the 172 + * circular waitq. 171 173 */ 172 174 last_waiter->next_waiter = first_waiter->next_waiter; 173 175 } 174 176 175 - /* The waiter is no longer in a wait queue. */ 177 + /* The waiter is no longer in a waitq. */ 176 178 first_waiter->next_waiter = NULL; 177 - queue->queue_length -= 1; 179 + waitq->length -= 1; 178 180 179 181 return first_waiter; 180 182 } 181 183 182 184 /** 183 - * vdo_notify_next_waiter() - Notify the next entry waiting in a queue. 184 - * @queue: The wait queue containing the waiter to notify. 185 + * vdo_waitq_notify_next_waiter() - Notify the next entry waiting in a waitq. 186 + * @waitq: The vdo_wait_queue containing the waiter to notify. 185 187 * @callback: The function to call to notify the waiter, or NULL to invoke the callback field 186 188 * registered in the waiter. 187 189 * @context: The context to pass to the callback function. 188 190 * 189 - * Notifies the next entry waiting in a queue to continue execution by invoking a callback function 190 - * on it after removing it from the queue. 191 + * Notifies the next entry waiting in a waitq to continue execution by invoking a callback function 192 + * on it after removing it from the waitq. 191 193 * 192 - * Return: true if there was a waiter in the queue. 194 + * Return: true if there was a waiter in the waitq. 193 195 */ 194 - bool vdo_notify_next_waiter(struct wait_queue *queue, waiter_callback_fn callback, 195 - void *context) 196 + bool vdo_waitq_notify_next_waiter(struct vdo_wait_queue *waitq, 197 + vdo_waiter_callback_fn callback, void *context) 196 198 { 197 - struct waiter *waiter = vdo_dequeue_next_waiter(queue); 199 + struct vdo_waiter *waiter = vdo_waitq_dequeue_next_waiter(waitq); 198 200 199 201 if (waiter == NULL) 200 202 return false; 201 203 202 204 if (callback == NULL) 203 205 callback = waiter->callback; 204 - (*callback)(waiter, context); 206 + callback(waiter, context); 205 207 206 208 return true; 207 209 } 208 210 209 211 /** 210 - * vdo_get_next_waiter() - Get the waiter after this one, for debug iteration. 211 - * @queue: The wait queue. 212 + * vdo_waitq_get_next_waiter() - Get the waiter after this one, for debug iteration. 213 + * @waitq: The vdo_wait_queue. 212 214 * @waiter: A waiter. 213 215 * 214 216 * Return: The next waiter, or NULL. 215 217 */ 216 - const struct waiter *vdo_get_next_waiter(const struct wait_queue *queue, 217 - const struct waiter *waiter) 218 + const struct vdo_waiter *vdo_waitq_get_next_waiter(const struct vdo_wait_queue *waitq, 219 + const struct vdo_waiter *waiter) 218 220 { 219 - struct waiter *first_waiter = vdo_get_first_waiter(queue); 221 + struct vdo_waiter *first_waiter = vdo_waitq_get_first_waiter(waitq); 220 222 221 223 if (waiter == NULL) 222 224 return first_waiter;
+70 -60
drivers/md/dm-vdo/wait-queue.h
··· 10 10 #include <linux/types.h> 11 11 12 12 /** 13 - * DOC: Wait queues. 13 + * A vdo_wait_queue is a circular singly linked list of entries waiting to be notified 14 + * of a change in a condition. Keeping a circular list allows the vdo_wait_queue 15 + * structure to simply be a pointer to the tail (newest) entry, supporting 16 + * constant-time enqueue and dequeue operations. A null pointer is an empty waitq. 14 17 * 15 - * A wait queue is a circular list of entries waiting to be notified of a change in a condition. 16 - * Keeping a circular list allows the queue structure to simply be a pointer to the tail (newest) 17 - * entry in the queue, supporting constant-time enqueue and dequeue operations. A null pointer is 18 - * an empty queue. 18 + * An empty waitq: 19 + * waitq0.last_waiter -> NULL 19 20 * 20 - * An empty queue: 21 - * queue0.last_waiter -> NULL 21 + * A singleton waitq: 22 + * waitq1.last_waiter -> entry1 -> entry1 -> [...] 22 23 * 23 - * A singleton queue: 24 - * queue1.last_waiter -> entry1 -> entry1 -> [...] 24 + * A three-element waitq: 25 + * waitq2.last_waiter -> entry3 -> entry1 -> entry2 -> entry3 -> [...] 25 26 * 26 - * A three-element queue: 27 - * queue2.last_waiter -> entry3 -> entry1 -> entry2 -> entry3 -> [...] 27 + * linux/wait.h's wait_queue_head is _not_ used because vdo_wait_queue's 28 + * interface is much less complex (doesn't need locking, priorities or timers). 29 + * Made possible by vdo's thread-based resource allocation and locking; and 30 + * the polling nature of vdo_wait_queue consumers. 31 + * 32 + * FIXME: could be made to use a linux/list.h's list_head but its extra barriers 33 + * really aren't needed. Nor is a doubly linked list, but vdo_wait_queue could 34 + * make use of __list_del_clearprev() -- but that would compromise the ability 35 + * to make full use of linux's list interface. 28 36 */ 29 37 30 - struct waiter; 38 + struct vdo_waiter; 31 39 32 - struct wait_queue { 40 + struct vdo_wait_queue { 33 41 /* The tail of the queue, the last (most recently added) entry */ 34 - struct waiter *last_waiter; 42 + struct vdo_waiter *last_waiter; 35 43 /* The number of waiters currently in the queue */ 36 - size_t queue_length; 44 + size_t length; 37 45 }; 38 46 39 47 /** 40 - * typedef waiter_callback_fn - Callback type for functions which will be called to resume 41 - * processing of a waiter after it has been removed from its wait 42 - * queue. 48 + * vdo_waiter_callback_fn - Callback type that will be called to resume processing 49 + * of a waiter after it has been removed from its wait queue. 43 50 */ 44 - typedef void (*waiter_callback_fn)(struct waiter *waiter, void *context); 51 + typedef void (*vdo_waiter_callback_fn)(struct vdo_waiter *waiter, void *context); 45 52 46 53 /** 47 - * typedef waiter_match_fn - Method type for waiter matching methods. 54 + * vdo_waiter_match_fn - Method type for waiter matching methods. 48 55 * 49 - * A waiter_match_fn method returns false if the waiter does not match. 56 + * Returns false if the waiter does not match. 50 57 */ 51 - typedef bool (*waiter_match_fn)(struct waiter *waiter, void *context); 58 + typedef bool (*vdo_waiter_match_fn)(struct vdo_waiter *waiter, void *context); 52 59 53 - /* The queue entry structure for entries in a wait_queue. */ 54 - struct waiter { 60 + /* The structure for entries in a vdo_wait_queue. */ 61 + struct vdo_waiter { 55 62 /* 56 - * The next waiter in the queue. If this entry is the last waiter, then this is actually a 57 - * pointer back to the head of the queue. 63 + * The next waiter in the waitq. If this entry is the last waiter, then this 64 + * is actually a pointer back to the head of the waitq. 58 65 */ 59 - struct waiter *next_waiter; 66 + struct vdo_waiter *next_waiter; 60 67 61 - /* Optional waiter-specific callback to invoke when waking this waiter. */ 62 - waiter_callback_fn callback; 68 + /* Optional waiter-specific callback to invoke when dequeuing this waiter. */ 69 + vdo_waiter_callback_fn callback; 63 70 }; 64 71 65 72 /** 66 - * is_waiting() - Check whether a waiter is waiting. 73 + * vdo_waiter_is_waiting() - Check whether a waiter is waiting. 67 74 * @waiter: The waiter to check. 68 75 * 69 - * Return: true if the waiter is on some wait_queue. 76 + * Return: true if the waiter is on some vdo_wait_queue. 70 77 */ 71 - static inline bool vdo_is_waiting(struct waiter *waiter) 78 + static inline bool vdo_waiter_is_waiting(struct vdo_waiter *waiter) 72 79 { 73 80 return (waiter->next_waiter != NULL); 74 81 } 75 82 76 83 /** 77 - * initialize_wait_queue() - Initialize a wait queue. 78 - * @queue: The queue to initialize. 84 + * vdo_waitq_init() - Initialize a vdo_wait_queue. 85 + * @waitq: The vdo_wait_queue to initialize. 79 86 */ 80 - static inline void vdo_initialize_wait_queue(struct wait_queue *queue) 87 + static inline void vdo_waitq_init(struct vdo_wait_queue *waitq) 81 88 { 82 - *queue = (struct wait_queue) { 89 + *waitq = (struct vdo_wait_queue) { 83 90 .last_waiter = NULL, 84 - .queue_length = 0, 91 + .length = 0, 85 92 }; 86 93 } 87 94 88 95 /** 89 - * has_waiters() - Check whether a wait queue has any entries waiting in it. 90 - * @queue: The queue to query. 96 + * vdo_waitq_has_waiters() - Check whether a vdo_wait_queue has any entries waiting. 97 + * @waitq: The vdo_wait_queue to query. 91 98 * 92 - * Return: true if there are any waiters in the queue. 99 + * Return: true if there are any waiters in the waitq. 93 100 */ 94 - static inline bool __must_check vdo_has_waiters(const struct wait_queue *queue) 101 + static inline bool __must_check vdo_waitq_has_waiters(const struct vdo_wait_queue *waitq) 95 102 { 96 - return (queue->last_waiter != NULL); 103 + return (waitq->last_waiter != NULL); 97 104 } 98 105 99 - void vdo_enqueue_waiter(struct wait_queue *queue, struct waiter *waiter); 106 + void vdo_waitq_enqueue_waiter(struct vdo_wait_queue *waitq, 107 + struct vdo_waiter *waiter); 100 108 101 - void vdo_notify_all_waiters(struct wait_queue *queue, waiter_callback_fn callback, 102 - void *context); 109 + void vdo_waitq_notify_all_waiters(struct vdo_wait_queue *waitq, 110 + vdo_waiter_callback_fn callback, void *context); 103 111 104 - bool vdo_notify_next_waiter(struct wait_queue *queue, waiter_callback_fn callback, 105 - void *context); 112 + bool vdo_waitq_notify_next_waiter(struct vdo_wait_queue *waitq, 113 + vdo_waiter_callback_fn callback, void *context); 106 114 107 - void vdo_transfer_all_waiters(struct wait_queue *from_queue, 108 - struct wait_queue *to_queue); 115 + void vdo_waitq_transfer_all_waiters(struct vdo_wait_queue *from_waitq, 116 + struct vdo_wait_queue *to_waitq); 109 117 110 - struct waiter *vdo_get_first_waiter(const struct wait_queue *queue); 118 + struct vdo_waiter *vdo_waitq_get_first_waiter(const struct vdo_wait_queue *waitq); 111 119 112 - void vdo_dequeue_matching_waiters(struct wait_queue *queue, waiter_match_fn match_method, 113 - void *match_context, struct wait_queue *matched_queue); 120 + void vdo_waitq_dequeue_matching_waiters(struct vdo_wait_queue *waitq, 121 + vdo_waiter_match_fn waiter_match, 122 + void *match_context, 123 + struct vdo_wait_queue *matched_waitq); 114 124 115 - struct waiter *vdo_dequeue_next_waiter(struct wait_queue *queue); 125 + struct vdo_waiter *vdo_waitq_dequeue_next_waiter(struct vdo_wait_queue *waitq); 116 126 117 127 /** 118 - * count_waiters() - Count the number of waiters in a wait queue. 119 - * @queue: The wait queue to query. 128 + * vdo_waitq_num_waiters() - Return the number of waiters in a vdo_wait_queue. 129 + * @waitq: The vdo_wait_queue to query. 120 130 * 121 - * Return: The number of waiters in the queue. 131 + * Return: The number of waiters in the waitq. 122 132 */ 123 - static inline size_t __must_check vdo_count_waiters(const struct wait_queue *queue) 133 + static inline size_t __must_check vdo_waitq_num_waiters(const struct vdo_wait_queue *waitq) 124 134 { 125 - return queue->queue_length; 135 + return waitq->length; 126 136 } 127 137 128 - const struct waiter * __must_check vdo_get_next_waiter(const struct wait_queue *queue, 129 - const struct waiter *waiter); 138 + const struct vdo_waiter * __must_check 139 + vdo_waitq_get_next_waiter(const struct vdo_wait_queue *waitq, const struct vdo_waiter *waiter); 130 140 131 141 #endif /* VDO_WAIT_QUEUE_H */