Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright 2023 Red Hat
4 */
5
6#include "physical-zone.h"
7
8#include <linux/list.h>
9
10#include "logger.h"
11#include "memory-alloc.h"
12#include "permassert.h"
13
14#include "block-map.h"
15#include "completion.h"
16#include "constants.h"
17#include "data-vio.h"
18#include "dedupe.h"
19#include "encodings.h"
20#include "flush.h"
21#include "int-map.h"
22#include "slab-depot.h"
23#include "status-codes.h"
24#include "vdo.h"
25
26/* Each user data_vio needs a PBN read lock and write lock. */
27#define LOCK_POOL_CAPACITY (2 * MAXIMUM_VDO_USER_VIOS)
28
29struct pbn_lock_implementation {
30 enum pbn_lock_type type;
31 const char *name;
32 const char *release_reason;
33};
34
35/* This array must have an entry for every pbn_lock_type value. */
36static const struct pbn_lock_implementation LOCK_IMPLEMENTATIONS[] = {
37 [VIO_READ_LOCK] = {
38 .type = VIO_READ_LOCK,
39 .name = "read",
40 .release_reason = "candidate duplicate",
41 },
42 [VIO_WRITE_LOCK] = {
43 .type = VIO_WRITE_LOCK,
44 .name = "write",
45 .release_reason = "newly allocated",
46 },
47 [VIO_BLOCK_MAP_WRITE_LOCK] = {
48 .type = VIO_BLOCK_MAP_WRITE_LOCK,
49 .name = "block map write",
50 .release_reason = "block map write",
51 },
52};
53
54static inline bool has_lock_type(const struct pbn_lock *lock, enum pbn_lock_type type)
55{
56 return (lock->implementation == &LOCK_IMPLEMENTATIONS[type]);
57}
58
59/**
60 * vdo_is_pbn_read_lock() - Check whether a pbn_lock is a read lock.
61 * @lock: The lock to check.
62 *
63 * Return: True if the lock is a read lock.
64 */
65bool vdo_is_pbn_read_lock(const struct pbn_lock *lock)
66{
67 return has_lock_type(lock, VIO_READ_LOCK);
68}
69
70static inline void set_pbn_lock_type(struct pbn_lock *lock, enum pbn_lock_type type)
71{
72 lock->implementation = &LOCK_IMPLEMENTATIONS[type];
73}
74
75/**
76 * vdo_downgrade_pbn_write_lock() - Downgrade a PBN write lock to a PBN read lock.
77 * @lock: The PBN write lock to downgrade.
78 * @compressed_write: True if the written block was a compressed block.
79 *
80 * The lock holder count is cleared and the caller is responsible for setting the new count.
81 */
82void vdo_downgrade_pbn_write_lock(struct pbn_lock *lock, bool compressed_write)
83{
84 VDO_ASSERT_LOG_ONLY(!vdo_is_pbn_read_lock(lock),
85 "PBN lock must not already have been downgraded");
86 VDO_ASSERT_LOG_ONLY(!has_lock_type(lock, VIO_BLOCK_MAP_WRITE_LOCK),
87 "must not downgrade block map write locks");
88 VDO_ASSERT_LOG_ONLY(lock->holder_count == 1,
89 "PBN write lock should have one holder but has %u",
90 lock->holder_count);
91 /*
92 * data_vio write locks are downgraded in place--the writer retains the hold on the lock.
93 * If this was a compressed write, the holder has not yet journaled its own inc ref,
94 * otherwise, it has.
95 */
96 lock->increment_limit =
97 (compressed_write ? MAXIMUM_REFERENCE_COUNT : MAXIMUM_REFERENCE_COUNT - 1);
98 set_pbn_lock_type(lock, VIO_READ_LOCK);
99}
100
101/**
102 * vdo_claim_pbn_lock_increment() - Try to claim one of the available reference count increments on
103 * a read lock.
104 * @lock: The PBN read lock from which to claim an increment.
105 *
106 * Claims may be attempted from any thread. A claim is only valid until the PBN lock is released.
107 *
108 * Return: true if the claim succeeded, guaranteeing one increment can be made without overflowing
109 * the PBN's reference count.
110 */
111bool vdo_claim_pbn_lock_increment(struct pbn_lock *lock)
112{
113 /*
114 * Claim the next free reference atomically since hash locks from multiple hash zone
115 * threads might be concurrently deduplicating against a single PBN lock on compressed
116 * block. As long as hitting the increment limit will lead to the PBN lock being released
117 * in a sane time-frame, we won't overflow a 32-bit claim counter, allowing a simple add
118 * instead of a compare-and-swap.
119 */
120 u32 claim_number = (u32) atomic_add_return(1, &lock->increments_claimed);
121
122 return (claim_number <= lock->increment_limit);
123}
124
125/**
126 * vdo_assign_pbn_lock_provisional_reference() - Inform a PBN lock that it is responsible for a
127 * provisional reference.
128 * @lock: The PBN lock.
129 */
130void vdo_assign_pbn_lock_provisional_reference(struct pbn_lock *lock)
131{
132 VDO_ASSERT_LOG_ONLY(!lock->has_provisional_reference,
133 "lock does not have a provisional reference");
134 lock->has_provisional_reference = true;
135}
136
137/**
138 * vdo_unassign_pbn_lock_provisional_reference() - Inform a PBN lock that it is no longer
139 * responsible for a provisional reference.
140 * @lock: The PBN lock.
141 */
142void vdo_unassign_pbn_lock_provisional_reference(struct pbn_lock *lock)
143{
144 lock->has_provisional_reference = false;
145}
146
147/**
148 * release_pbn_lock_provisional_reference() - If the lock is responsible for a provisional
149 * reference, release that reference.
150 * @lock: The lock.
151 * @locked_pbn: The PBN covered by the lock.
152 * @allocator: The block allocator from which to release the reference.
153 *
154 * This method is called when the lock is released.
155 */
156static void release_pbn_lock_provisional_reference(struct pbn_lock *lock,
157 physical_block_number_t locked_pbn,
158 struct block_allocator *allocator)
159{
160 int result;
161
162 if (!vdo_pbn_lock_has_provisional_reference(lock))
163 return;
164
165 result = vdo_release_block_reference(allocator, locked_pbn);
166 if (result != VDO_SUCCESS) {
167 vdo_log_error_strerror(result,
168 "Failed to release reference to %s physical block %llu",
169 lock->implementation->release_reason,
170 (unsigned long long) locked_pbn);
171 }
172
173 vdo_unassign_pbn_lock_provisional_reference(lock);
174}
175
176/**
177 * union idle_pbn_lock - PBN lock list entries.
178 *
179 * Unused (idle) PBN locks are kept in a list. Just like in a malloc implementation, the lock
180 * structure is unused memory, so we can save a bit of space (and not pollute the lock structure
181 * proper) by using a union to overlay the lock structure with the free list.
182 */
183typedef union {
184 /** @entry: Only used while locks are in the pool. */
185 struct list_head entry;
186 /** @lock: Only used while locks are not in the pool. */
187 struct pbn_lock lock;
188} idle_pbn_lock;
189
190/**
191 * struct pbn_lock_pool - list of PBN locks.
192 *
193 * The lock pool is little more than the memory allocated for the locks.
194 */
195struct pbn_lock_pool {
196 /** @capacity: The number of locks allocated for the pool. */
197 size_t capacity;
198 /** @borrowed: The number of locks currently borrowed from the pool. */
199 size_t borrowed;
200 /** @idle_list: A list containing all idle PBN lock instances. */
201 struct list_head idle_list;
202 /** @locks: The memory for all the locks allocated by this pool. */
203 idle_pbn_lock locks[] __counted_by(capacity);
204};
205
206/**
207 * return_pbn_lock_to_pool() - Return a pbn lock to its pool.
208 * @pool: The pool from which the lock was borrowed.
209 * @lock: The last reference to the lock being returned.
210 *
211 * It must be the last live reference, as if the memory were being freed (the lock memory will
212 * re-initialized or zeroed).
213 */
214static void return_pbn_lock_to_pool(struct pbn_lock_pool *pool, struct pbn_lock *lock)
215{
216 idle_pbn_lock *idle;
217
218 /* A bit expensive, but will promptly catch some use-after-free errors. */
219 memset(lock, 0, sizeof(*lock));
220
221 idle = container_of(lock, idle_pbn_lock, lock);
222 INIT_LIST_HEAD(&idle->entry);
223 list_add_tail(&idle->entry, &pool->idle_list);
224
225 VDO_ASSERT_LOG_ONLY(pool->borrowed > 0, "shouldn't return more than borrowed");
226 pool->borrowed -= 1;
227}
228
229/**
230 * make_pbn_lock_pool() - Create a new PBN lock pool and all the lock instances it can loan out.
231 *
232 * @capacity: The number of PBN locks to allocate for the pool.
233 * @pool_ptr: A pointer to receive the new pool.
234 *
235 * Return: VDO_SUCCESS or an error code.
236 */
237static int make_pbn_lock_pool(size_t capacity, struct pbn_lock_pool **pool_ptr)
238{
239 size_t i;
240 struct pbn_lock_pool *pool;
241 int result;
242
243 result = vdo_allocate_extended(capacity, locks, __func__, &pool);
244 if (result != VDO_SUCCESS)
245 return result;
246
247 pool->capacity = capacity;
248 pool->borrowed = capacity;
249 INIT_LIST_HEAD(&pool->idle_list);
250
251 for (i = 0; i < capacity; i++)
252 return_pbn_lock_to_pool(pool, &pool->locks[i].lock);
253
254 *pool_ptr = pool;
255 return VDO_SUCCESS;
256}
257
258/**
259 * free_pbn_lock_pool() - Free a PBN lock pool.
260 * @pool: The lock pool to free.
261 *
262 * This also frees all the PBN locks it allocated, so the caller must ensure that all locks have
263 * been returned to the pool.
264 */
265static void free_pbn_lock_pool(struct pbn_lock_pool *pool)
266{
267 if (pool == NULL)
268 return;
269
270 VDO_ASSERT_LOG_ONLY(pool->borrowed == 0,
271 "All PBN locks must be returned to the pool before it is freed, but %zu locks are still on loan",
272 pool->borrowed);
273 vdo_free(pool);
274}
275
276/**
277 * borrow_pbn_lock_from_pool() - Borrow a PBN lock from the pool and initialize it with the
278 * provided type.
279 * @pool: The pool from which to borrow.
280 * @type: The type with which to initialize the lock.
281 * @lock_ptr: A pointer to receive the borrowed lock.
282 *
283 * Pools do not grow on demand or allocate memory, so this will fail if the pool is empty. Borrowed
284 * locks are still associated with this pool and must be returned to only this pool.
285 *
286 * Return: VDO_SUCCESS, or VDO_LOCK_ERROR if the pool is empty.
287 */
288static int __must_check borrow_pbn_lock_from_pool(struct pbn_lock_pool *pool,
289 enum pbn_lock_type type,
290 struct pbn_lock **lock_ptr)
291{
292 int result;
293 struct list_head *idle_entry;
294 idle_pbn_lock *idle;
295
296 if (pool->borrowed >= pool->capacity)
297 return vdo_log_error_strerror(VDO_LOCK_ERROR,
298 "no free PBN locks left to borrow");
299 pool->borrowed += 1;
300
301 result = VDO_ASSERT(!list_empty(&pool->idle_list),
302 "idle list should not be empty if pool not at capacity");
303 if (result != VDO_SUCCESS)
304 return result;
305
306 idle_entry = pool->idle_list.prev;
307 list_del(idle_entry);
308 memset(idle_entry, 0, sizeof(*idle_entry));
309
310 idle = list_entry(idle_entry, idle_pbn_lock, entry);
311 idle->lock.holder_count = 0;
312 set_pbn_lock_type(&idle->lock, type);
313
314 *lock_ptr = &idle->lock;
315 return VDO_SUCCESS;
316}
317
318/**
319 * initialize_zone() - Initialize a physical zone.
320 * @vdo: The vdo to which the zone will belong.
321 * @zones: The physical_zones to which the zone being initialized belongs
322 *
323 * Return: VDO_SUCCESS or an error code.
324 */
325static int initialize_zone(struct vdo *vdo, struct physical_zones *zones)
326{
327 int result;
328 zone_count_t zone_number = zones->zone_count;
329 struct physical_zone *zone = &zones->zones[zone_number];
330
331 result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->pbn_operations);
332 if (result != VDO_SUCCESS)
333 return result;
334
335 result = make_pbn_lock_pool(LOCK_POOL_CAPACITY, &zone->lock_pool);
336 if (result != VDO_SUCCESS) {
337 vdo_int_map_free(zone->pbn_operations);
338 return result;
339 }
340
341 zone->zone_number = zone_number;
342 zone->thread_id = vdo->thread_config.physical_threads[zone_number];
343 zone->allocator = &vdo->depot->allocators[zone_number];
344 zone->next = &zones->zones[(zone_number + 1) % vdo->thread_config.physical_zone_count];
345 result = vdo_make_default_thread(vdo, zone->thread_id);
346 if (result != VDO_SUCCESS) {
347 free_pbn_lock_pool(vdo_forget(zone->lock_pool));
348 vdo_int_map_free(zone->pbn_operations);
349 return result;
350 }
351 return result;
352}
353
354/**
355 * vdo_make_physical_zones() - Make the physical zones for a vdo.
356 * @vdo: The vdo being constructed
357 * @zones_ptr: A pointer to hold the zones
358 *
359 * Return: VDO_SUCCESS or an error code.
360 */
361int vdo_make_physical_zones(struct vdo *vdo, struct physical_zones **zones_ptr)
362{
363 struct physical_zones *zones;
364 int result;
365 zone_count_t zone_count = vdo->thread_config.physical_zone_count;
366
367 if (zone_count == 0)
368 return VDO_SUCCESS;
369
370 result = vdo_allocate_extended(zone_count, zones, __func__, &zones);
371 if (result != VDO_SUCCESS)
372 return result;
373
374 for (zones->zone_count = 0; zones->zone_count < zone_count; zones->zone_count++) {
375 result = initialize_zone(vdo, zones);
376 if (result != VDO_SUCCESS) {
377 vdo_free_physical_zones(zones);
378 return result;
379 }
380 }
381
382 *zones_ptr = zones;
383 return VDO_SUCCESS;
384}
385
386/**
387 * vdo_free_physical_zones() - Destroy the physical zones.
388 * @zones: The zones to free.
389 */
390void vdo_free_physical_zones(struct physical_zones *zones)
391{
392 zone_count_t index;
393
394 if (zones == NULL)
395 return;
396
397 for (index = 0; index < zones->zone_count; index++) {
398 struct physical_zone *zone = &zones->zones[index];
399
400 free_pbn_lock_pool(vdo_forget(zone->lock_pool));
401 vdo_int_map_free(vdo_forget(zone->pbn_operations));
402 }
403
404 vdo_free(zones);
405}
406
407/**
408 * vdo_get_physical_zone_pbn_lock() - Get the lock on a PBN if one exists.
409 * @zone: The physical zone responsible for the PBN.
410 * @pbn: The physical block number whose lock is desired.
411 *
412 * Return: The lock or NULL if the PBN is not locked.
413 */
414struct pbn_lock *vdo_get_physical_zone_pbn_lock(struct physical_zone *zone,
415 physical_block_number_t pbn)
416{
417 return ((zone == NULL) ? NULL : vdo_int_map_get(zone->pbn_operations, pbn));
418}
419
420/**
421 * vdo_attempt_physical_zone_pbn_lock() - Attempt to lock a physical block in the zone responsible
422 * for it.
423 * @zone: The physical zone responsible for the PBN.
424 * @pbn: The physical block number to lock.
425 * @type: The type with which to initialize a new lock.
426 * @lock_ptr: A pointer to receive the lock, existing or new.
427 *
428 * If the PBN is already locked, the existing lock will be returned. Otherwise, a new lock instance
429 * will be borrowed from the pool, initialized, and returned. The lock owner will be NULL for a new
430 * lock acquired by the caller, who is responsible for setting that field promptly. The lock owner
431 * will be non-NULL when there is already an existing lock on the PBN.
432 *
433 * Return: VDO_SUCCESS or an error.
434 */
435int vdo_attempt_physical_zone_pbn_lock(struct physical_zone *zone,
436 physical_block_number_t pbn,
437 enum pbn_lock_type type,
438 struct pbn_lock **lock_ptr)
439{
440 /*
441 * Borrow and prepare a lock from the pool so we don't have to do two int_map accesses in
442 * the common case of no lock contention.
443 */
444 struct pbn_lock *lock, *new_lock = NULL;
445 int result;
446
447 result = borrow_pbn_lock_from_pool(zone->lock_pool, type, &new_lock);
448 if (result != VDO_SUCCESS) {
449 VDO_ASSERT_LOG_ONLY(false, "must always be able to borrow a PBN lock");
450 return result;
451 }
452
453 result = vdo_int_map_put(zone->pbn_operations, pbn, new_lock, false,
454 (void **) &lock);
455 if (result != VDO_SUCCESS) {
456 return_pbn_lock_to_pool(zone->lock_pool, new_lock);
457 return result;
458 }
459
460 if (lock != NULL) {
461 /* The lock is already held, so we don't need the borrowed one. */
462 return_pbn_lock_to_pool(zone->lock_pool, vdo_forget(new_lock));
463 result = VDO_ASSERT(lock->holder_count > 0, "physical block %llu lock held",
464 (unsigned long long) pbn);
465 if (result != VDO_SUCCESS)
466 return result;
467 *lock_ptr = lock;
468 } else {
469 *lock_ptr = new_lock;
470 }
471 return VDO_SUCCESS;
472}
473
474/**
475 * allocate_and_lock_block() - Attempt to allocate a block from this zone.
476 * @allocation: The struct allocation of the data_vio attempting to allocate.
477 *
478 * If a block is allocated, the recipient will also hold a lock on it.
479 *
480 * Return: VDO_SUCCESS if a block was allocated, or an error code.
481 */
482static int allocate_and_lock_block(struct allocation *allocation)
483{
484 int result;
485 struct pbn_lock *lock;
486
487 VDO_ASSERT_LOG_ONLY(allocation->lock == NULL,
488 "must not allocate a block while already holding a lock on one");
489
490 result = vdo_allocate_block(allocation->zone->allocator, &allocation->pbn);
491 if (result != VDO_SUCCESS)
492 return result;
493
494 result = vdo_attempt_physical_zone_pbn_lock(allocation->zone, allocation->pbn,
495 allocation->write_lock_type, &lock);
496 if (result != VDO_SUCCESS)
497 return result;
498
499 if (lock->holder_count > 0) {
500 /* This block is already locked, which should be impossible. */
501 return vdo_log_error_strerror(VDO_LOCK_ERROR,
502 "Newly allocated block %llu was spuriously locked (holder_count=%u)",
503 (unsigned long long) allocation->pbn,
504 lock->holder_count);
505 }
506
507 /* We've successfully acquired a new lock, so mark it as ours. */
508 lock->holder_count += 1;
509 allocation->lock = lock;
510 vdo_assign_pbn_lock_provisional_reference(lock);
511 return VDO_SUCCESS;
512}
513
514/**
515 * retry_allocation() - Retry allocating a block now that we're done waiting for scrubbing.
516 * @waiter: The allocating_vio that was waiting to allocate.
517 * @context: The context (unused).
518 */
519static void retry_allocation(struct vdo_waiter *waiter, void __always_unused *context)
520{
521 struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter);
522
523 /* Now that some slab has scrubbed, restart the allocation process. */
524 data_vio->allocation.wait_for_clean_slab = false;
525 data_vio->allocation.first_allocation_zone = data_vio->allocation.zone->zone_number;
526 continue_data_vio(data_vio);
527}
528
529/**
530 * continue_allocating() - Continue searching for an allocation by enqueuing to wait for scrubbing
531 * or switching to the next zone.
532 * @data_vio: The data_vio attempting to get an allocation.
533 *
534 * This method should only be called from the error handler set in data_vio_allocate_data_block.
535 *
536 * Return: true if the allocation process has continued in another zone.
537 */
538static bool continue_allocating(struct data_vio *data_vio)
539{
540 struct allocation *allocation = &data_vio->allocation;
541 struct physical_zone *zone = allocation->zone;
542 struct vdo_completion *completion = &data_vio->vio.completion;
543 int result = VDO_SUCCESS;
544 bool was_waiting = allocation->wait_for_clean_slab;
545 bool tried_all = (allocation->first_allocation_zone == zone->next->zone_number);
546
547 vdo_reset_completion(completion);
548
549 if (tried_all && !was_waiting) {
550 /*
551 * We've already looked in all the zones, and found nothing. So go through the
552 * zones again, and wait for each to scrub before trying to allocate.
553 */
554 allocation->wait_for_clean_slab = true;
555 allocation->first_allocation_zone = zone->zone_number;
556 }
557
558 if (allocation->wait_for_clean_slab) {
559 data_vio->waiter.callback = retry_allocation;
560 result = vdo_enqueue_clean_slab_waiter(zone->allocator,
561 &data_vio->waiter);
562 if (result == VDO_SUCCESS) {
563 /* We've enqueued to wait for a slab to be scrubbed. */
564 return true;
565 }
566
567 if ((result != VDO_NO_SPACE) || (was_waiting && tried_all)) {
568 vdo_set_completion_result(completion, result);
569 return false;
570 }
571 }
572
573 allocation->zone = zone->next;
574 completion->callback_thread_id = allocation->zone->thread_id;
575 vdo_launch_completion(completion);
576 return true;
577}
578
579/**
580 * vdo_allocate_block_in_zone() - Attempt to allocate a block in the current physical zone, and if
581 * that fails try the next if possible.
582 * @data_vio: The data_vio needing an allocation.
583 *
584 * Return: True if a block was allocated, if not the data_vio will have been dispatched so the
585 * caller must not touch it.
586 */
587bool vdo_allocate_block_in_zone(struct data_vio *data_vio)
588{
589 int result = allocate_and_lock_block(&data_vio->allocation);
590
591 if (result == VDO_SUCCESS)
592 return true;
593
594 if ((result != VDO_NO_SPACE) || !continue_allocating(data_vio))
595 continue_data_vio_with_error(data_vio, result);
596
597 return false;
598}
599
600/**
601 * vdo_release_physical_zone_pbn_lock() - Release a physical block lock if it is held and return it
602 * to the lock pool.
603 * @zone: The physical zone in which the lock was obtained.
604 * @locked_pbn: The physical block number to unlock.
605 * @lock: The lock being released.
606 *
607 * It must be the last live reference, as if the memory were being freed (the
608 * lock memory will re-initialized or zeroed).
609 */
610void vdo_release_physical_zone_pbn_lock(struct physical_zone *zone,
611 physical_block_number_t locked_pbn,
612 struct pbn_lock *lock)
613{
614 struct pbn_lock *holder;
615
616 if (lock == NULL)
617 return;
618
619 VDO_ASSERT_LOG_ONLY(lock->holder_count > 0,
620 "should not be releasing a lock that is not held");
621
622 lock->holder_count -= 1;
623 if (lock->holder_count > 0) {
624 /* The lock was shared and is still referenced, so don't release it yet. */
625 return;
626 }
627
628 holder = vdo_int_map_remove(zone->pbn_operations, locked_pbn);
629 VDO_ASSERT_LOG_ONLY((lock == holder), "physical block lock mismatch for block %llu",
630 (unsigned long long) locked_pbn);
631
632 release_pbn_lock_provisional_reference(lock, locked_pbn, zone->allocator);
633 return_pbn_lock_to_pool(zone->lock_pool, lock);
634}
635
636/**
637 * vdo_dump_physical_zone() - Dump information about a physical zone to the log for debugging.
638 * @zone: The zone to dump.
639 */
640void vdo_dump_physical_zone(const struct physical_zone *zone)
641{
642 vdo_dump_block_allocator(zone->allocator);
643}