Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drbd: Fix spelling

Found these with the help of ispell -l.

Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>

authored by

Bart Van Assche and committed by
Philipp Reisner
24c4830c 9a0d9d03

+45 -45
+1 -1
drivers/block/drbd/drbd_actlog.c
··· 28 28 #include "drbd_int.h" 29 29 #include "drbd_wrappers.h" 30 30 31 - /* We maintain a trivial check sum in our on disk activity log. 31 + /* We maintain a trivial checksum in our on disk activity log. 32 32 * With that we can ensure correct operation even when the storage 33 33 * device might do a partial (last) sector write while losing power. 34 34 */
+3 -3
drivers/block/drbd/drbd_bitmap.c
··· 74 74 * as we are "attached" to a local disk, which at 32 GiB for 1PiB storage 75 75 * seems excessive. 76 76 * 77 - * We plan to reduce the amount of in-core bitmap pages by pageing them in 77 + * We plan to reduce the amount of in-core bitmap pages by paging them in 78 78 * and out against their on-disk location as necessary, but need to make 79 79 * sure we don't cause too much meta data IO, and must not deadlock in 80 80 * tight memory situations. This needs some more work. ··· 200 200 * we if bits have been cleared since last IO. */ 201 201 #define BM_PAGE_LAZY_WRITEOUT 28 202 202 203 - /* store_page_idx uses non-atomic assingment. It is only used directly after 203 + /* store_page_idx uses non-atomic assignment. It is only used directly after 204 204 * allocating the page. All other bm_set_page_* and bm_clear_page_* need to 205 205 * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap 206 206 * changes) may happen from various contexts, and wait_on_bit/wake_up_bit ··· 318 318 /* word offset from start of bitmap to word number _in_page_ 319 319 * modulo longs per page 320 320 #define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long)) 321 - hm, well, Philipp thinks gcc might not optimze the % into & (... - 1) 321 + hm, well, Philipp thinks gcc might not optimize the % into & (... - 1) 322 322 so do it explicitly: 323 323 */ 324 324 #define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1))
+3 -3
drivers/block/drbd/drbd_int.h
··· 699 699 * see drbd_endio_pri(). */ 700 700 struct bio *private_bio; 701 701 702 - struct hlist_node colision; 702 + struct hlist_node collision; 703 703 sector_t sector; 704 704 unsigned int size; 705 705 unsigned int epoch; /* barrier_nr */ ··· 765 765 766 766 struct drbd_epoch_entry { 767 767 struct drbd_work w; 768 - struct hlist_node colision; 768 + struct hlist_node collision; 769 769 struct drbd_epoch *epoch; /* for writes */ 770 770 struct drbd_conf *mdev; 771 771 struct page *pages; ··· 1520 1520 extern char *ppsize(char *buf, unsigned long long size); 1521 1521 extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int); 1522 1522 enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 }; 1523 - extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local); 1523 + extern enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local); 1524 1524 extern void resync_after_online_grow(struct drbd_conf *); 1525 1525 extern void drbd_reconsider_max_bio_size(struct drbd_conf *mdev); 1526 1526 extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev,
+2 -2
drivers/block/drbd/drbd_main.c
··· 2732 2732 2733 2733 /* double check digest, sometimes buffers have been modified in flight. */ 2734 2734 if (dgs > 0 && dgs <= 64) { 2735 - /* 64 byte, 512 bit, is the larges digest size 2735 + /* 64 byte, 512 bit, is the largest digest size 2736 2736 * currently supported in kernel crypto. */ 2737 2737 unsigned char digest[64]; 2738 2738 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest); ··· 3287 3287 3288 3288 drbd_release_ee_lists(mdev); 3289 3289 3290 - /* should be free'd on disconnect? */ 3290 + /* should be freed on disconnect? */ 3291 3291 kfree(mdev->ee_hash); 3292 3292 /* 3293 3293 mdev->ee_hash_s = 0;
+3 -3
drivers/block/drbd/drbd_nl.c
··· 596 596 * Returns 0 on success, negative return values indicate errors. 597 597 * You should call drbd_md_sync() after calling this function. 598 598 */ 599 - enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local) 599 + enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local) 600 600 { 601 601 sector_t prev_first_sect, prev_size; /* previous meta location */ 602 602 sector_t la_size; ··· 1205 1205 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND)) 1206 1206 set_bit(USE_DEGR_WFC_T, &mdev->flags); 1207 1207 1208 - dd = drbd_determin_dev_size(mdev, 0); 1208 + dd = drbd_determine_dev_size(mdev, 0); 1209 1209 if (dd == dev_size_error) { 1210 1210 retcode = ERR_NOMEM_BITMAP; 1211 1211 goto force_diskless_dec; ··· 1719 1719 1720 1720 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size; 1721 1721 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0); 1722 - dd = drbd_determin_dev_size(mdev, ddsf); 1722 + dd = drbd_determine_dev_size(mdev, ddsf); 1723 1723 drbd_md_sync(mdev); 1724 1724 put_ldev(mdev); 1725 1725 if (dd == dev_size_error) {
+15 -15
drivers/block/drbd/drbd_receiver.c
··· 333 333 if (!page) 334 334 goto fail; 335 335 336 - INIT_HLIST_NODE(&e->colision); 336 + INIT_HLIST_NODE(&e->collision); 337 337 e->epoch = NULL; 338 338 e->mdev = mdev; 339 339 e->pages = page; ··· 356 356 kfree(e->digest); 357 357 drbd_pp_free(mdev, e->pages, is_net); 358 358 D_ASSERT(atomic_read(&e->pending_bios) == 0); 359 - D_ASSERT(hlist_unhashed(&e->colision)); 359 + D_ASSERT(hlist_unhashed(&e->collision)); 360 360 mempool_free(e, drbd_ee_mempool); 361 361 } 362 362 ··· 1413 1413 sector_t sector = e->sector; 1414 1414 int ok; 1415 1415 1416 - D_ASSERT(hlist_unhashed(&e->colision)); 1416 + D_ASSERT(hlist_unhashed(&e->collision)); 1417 1417 1418 1418 if (likely((e->flags & EE_WAS_ERROR) == 0)) { 1419 1419 drbd_set_in_sync(mdev, sector, e->size); ··· 1482 1482 return false; 1483 1483 } 1484 1484 1485 - /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid 1485 + /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid 1486 1486 * special casing it there for the various failure cases. 1487 1487 * still no race with drbd_fail_pending_reads */ 1488 1488 ok = recv_dless_read(mdev, req, sector, data_size); ··· 1553 1553 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */ 1554 1554 if (mdev->net_conf->two_primaries) { 1555 1555 spin_lock_irq(&mdev->req_lock); 1556 - D_ASSERT(!hlist_unhashed(&e->colision)); 1557 - hlist_del_init(&e->colision); 1556 + D_ASSERT(!hlist_unhashed(&e->collision)); 1557 + hlist_del_init(&e->collision); 1558 1558 spin_unlock_irq(&mdev->req_lock); 1559 1559 } else { 1560 - D_ASSERT(hlist_unhashed(&e->colision)); 1560 + D_ASSERT(hlist_unhashed(&e->collision)); 1561 1561 } 1562 1562 1563 1563 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0)); ··· 1574 1574 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e); 1575 1575 1576 1576 spin_lock_irq(&mdev->req_lock); 1577 - D_ASSERT(!hlist_unhashed(&e->colision)); 1578 - hlist_del_init(&e->colision); 1577 + D_ASSERT(!hlist_unhashed(&e->collision)); 1578 + hlist_del_init(&e->collision); 1579 1579 spin_unlock_irq(&mdev->req_lock); 1580 1580 1581 1581 dec_unacked(mdev); ··· 1750 1750 1751 1751 spin_lock_irq(&mdev->req_lock); 1752 1752 1753 - hlist_add_head(&e->colision, ee_hash_slot(mdev, sector)); 1753 + hlist_add_head(&e->collision, ee_hash_slot(mdev, sector)); 1754 1754 1755 1755 #define OVERLAPS overlaps(i->sector, i->size, sector, size) 1756 1756 slot = tl_hash_slot(mdev, sector); ··· 1760 1760 int have_conflict = 0; 1761 1761 prepare_to_wait(&mdev->misc_wait, &wait, 1762 1762 TASK_INTERRUPTIBLE); 1763 - hlist_for_each_entry(i, n, slot, colision) { 1763 + hlist_for_each_entry(i, n, slot, collision) { 1764 1764 if (OVERLAPS) { 1765 1765 /* only ALERT on first iteration, 1766 1766 * we may be woken up early... */ ··· 1799 1799 } 1800 1800 1801 1801 if (signal_pending(current)) { 1802 - hlist_del_init(&e->colision); 1802 + hlist_del_init(&e->collision); 1803 1803 1804 1804 spin_unlock_irq(&mdev->req_lock); 1805 1805 ··· 1857 1857 dev_err(DEV, "submit failed, triggering re-connect\n"); 1858 1858 spin_lock_irq(&mdev->req_lock); 1859 1859 list_del(&e->w.list); 1860 - hlist_del_init(&e->colision); 1860 + hlist_del_init(&e->collision); 1861 1861 spin_unlock_irq(&mdev->req_lock); 1862 1862 if (e->flags & EE_CALL_AL_COMPLETE_IO) 1863 1863 drbd_al_complete_io(mdev, e->sector); ··· 2988 2988 2989 2989 ddsf = be16_to_cpu(p->dds_flags); 2990 2990 if (get_ldev(mdev)) { 2991 - dd = drbd_determin_dev_size(mdev, ddsf); 2991 + dd = drbd_determine_dev_size(mdev, ddsf); 2992 2992 put_ldev(mdev); 2993 2993 if (dd == dev_size_error) 2994 2994 return false; ··· 4261 4261 struct hlist_node *n; 4262 4262 struct drbd_request *req; 4263 4263 4264 - hlist_for_each_entry(req, n, slot, colision) { 4264 + hlist_for_each_entry(req, n, slot, collision) { 4265 4265 if ((unsigned long)req == (unsigned long)id) { 4266 4266 if (req->sector != sector) { 4267 4267 dev_err(DEV, "_ack_id_to_req: found req %p but it has "
+9 -9
drivers/block/drbd/drbd_req.c
··· 163 163 * they must have been failed on the spot */ 164 164 #define OVERLAPS overlaps(sector, size, i->sector, i->size) 165 165 slot = tl_hash_slot(mdev, sector); 166 - hlist_for_each_entry(i, n, slot, colision) { 166 + hlist_for_each_entry(i, n, slot, collision) { 167 167 if (OVERLAPS) { 168 168 dev_alert(DEV, "LOGIC BUG: completed: %p %llus +%u; " 169 169 "other: %p %llus +%u\n", ··· 187 187 #undef OVERLAPS 188 188 #define OVERLAPS overlaps(sector, size, e->sector, e->size) 189 189 slot = ee_hash_slot(mdev, req->sector); 190 - hlist_for_each_entry(e, n, slot, colision) { 190 + hlist_for_each_entry(e, n, slot, collision) { 191 191 if (OVERLAPS) { 192 192 wake_up(&mdev->misc_wait); 193 193 break; ··· 260 260 261 261 /* remove the request from the conflict detection 262 262 * respective block_id verification hash */ 263 - if (!hlist_unhashed(&req->colision)) 264 - hlist_del(&req->colision); 263 + if (!hlist_unhashed(&req->collision)) 264 + hlist_del(&req->collision); 265 265 else 266 266 D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0); 267 267 ··· 329 329 struct hlist_node *n; 330 330 struct hlist_head *slot; 331 331 332 - D_ASSERT(hlist_unhashed(&req->colision)); 332 + D_ASSERT(hlist_unhashed(&req->collision)); 333 333 334 334 if (!get_net_conf(mdev)) 335 335 return 0; ··· 341 341 342 342 #define OVERLAPS overlaps(i->sector, i->size, sector, size) 343 343 slot = tl_hash_slot(mdev, sector); 344 - hlist_for_each_entry(i, n, slot, colision) { 344 + hlist_for_each_entry(i, n, slot, collision) { 345 345 if (OVERLAPS) { 346 346 dev_alert(DEV, "%s[%u] Concurrent local write detected! " 347 347 "[DISCARD L] new: %llus +%u; " ··· 359 359 #undef OVERLAPS 360 360 #define OVERLAPS overlaps(e->sector, e->size, sector, size) 361 361 slot = ee_hash_slot(mdev, sector); 362 - hlist_for_each_entry(e, n, slot, colision) { 362 + hlist_for_each_entry(e, n, slot, collision) { 363 363 if (OVERLAPS) { 364 364 dev_alert(DEV, "%s[%u] Concurrent remote write detected!" 365 365 " [DISCARD L] new: %llus +%u; " ··· 491 491 492 492 /* so we can verify the handle in the answer packet 493 493 * corresponding hlist_del is in _req_may_be_done() */ 494 - hlist_add_head(&req->colision, ar_hash_slot(mdev, req->sector)); 494 + hlist_add_head(&req->collision, ar_hash_slot(mdev, req->sector)); 495 495 496 496 set_bit(UNPLUG_REMOTE, &mdev->flags); 497 497 ··· 507 507 /* assert something? */ 508 508 /* from drbd_make_request_common only */ 509 509 510 - hlist_add_head(&req->colision, tl_hash_slot(mdev, req->sector)); 510 + hlist_add_head(&req->collision, tl_hash_slot(mdev, req->sector)); 511 511 /* corresponding hlist_del is in _req_may_be_done() */ 512 512 513 513 /* NOTE
+2 -2
drivers/block/drbd/drbd_req.h
··· 256 256 struct hlist_node *n; 257 257 struct drbd_request *req; 258 258 259 - hlist_for_each_entry(req, n, slot, colision) { 259 + hlist_for_each_entry(req, n, slot, collision) { 260 260 if ((unsigned long)req == (unsigned long)id) { 261 261 D_ASSERT(req->sector == sector); 262 262 return req; ··· 291 291 req->epoch = 0; 292 292 req->sector = bio_src->bi_sector; 293 293 req->size = bio_src->bi_size; 294 - INIT_HLIST_NODE(&req->colision); 294 + INIT_HLIST_NODE(&req->collision); 295 295 INIT_LIST_HEAD(&req->tl_requests); 296 296 INIT_LIST_HEAD(&req->w.list); 297 297 }
+2 -2
drivers/block/drbd/drbd_worker.c
··· 126 126 list_del(&e->w.list); /* has been on active_ee or sync_ee */ 127 127 list_add_tail(&e->w.list, &mdev->done_ee); 128 128 129 - /* No hlist_del_init(&e->colision) here, we did not send the Ack yet, 129 + /* No hlist_del_init(&e->collision) here, we did not send the Ack yet, 130 130 * neither did we wake possibly waiting conflicting requests. 131 131 * done from "drbd_process_done_ee" within the appropriate w.cb 132 132 * (e_end_block/e_end_resync_block) or from _drbd_clear_done_ee */ ··· 840 840 const int ratio = 841 841 (t == 0) ? 0 : 842 842 (t < 100000) ? ((s*100)/t) : (s/(t/100)); 843 - dev_info(DEV, "%u %% had equal check sums, eliminated: %luK; " 843 + dev_info(DEV, "%u %% had equal checksums, eliminated: %luK; " 844 844 "transferred %luK total %luK\n", 845 845 ratio, 846 846 Bit2KB(mdev->rs_same_csum),
+4 -4
include/linux/drbd.h
··· 38 38 39 39 /* Although the Linux source code makes a difference between 40 40 generic endianness and the bitfields' endianness, there is no 41 - architecture as of Linux-2.6.24-rc4 where the bitfileds' endianness 41 + architecture as of Linux-2.6.24-rc4 where the bitfields' endianness 42 42 does not match the generic endianness. */ 43 43 44 44 #if __BYTE_ORDER == __LITTLE_ENDIAN ··· 195 195 C_WF_REPORT_PARAMS, /* we have a socket */ 196 196 C_CONNECTED, /* we have introduced each other */ 197 197 C_STARTING_SYNC_S, /* starting full sync by admin request. */ 198 - C_STARTING_SYNC_T, /* stariing full sync by admin request. */ 198 + C_STARTING_SYNC_T, /* starting full sync by admin request. */ 199 199 C_WF_BITMAP_S, 200 200 C_WF_BITMAP_T, 201 201 C_WF_SYNC_UUID, ··· 236 236 * pointed out by Maxim Uvarov q<muvarov@ru.mvista.com> 237 237 * even though we transmit as "cpu_to_be32(state)", 238 238 * the offsets of the bitfields still need to be swapped 239 - * on different endianess. 239 + * on different endianness. 240 240 */ 241 241 struct { 242 242 #if defined(__LITTLE_ENDIAN_BITFIELD) ··· 266 266 unsigned peer:2 ; /* 3/4 primary/secondary/unknown */ 267 267 unsigned role:2 ; /* 3/4 primary/secondary/unknown */ 268 268 #else 269 - # error "this endianess is not supported" 269 + # error "this endianness is not supported" 270 270 #endif 271 271 }; 272 272 unsigned int i;
+1 -1
include/linux/drbd_tag_magic.h
··· 30 30 int tag_and_len ## member; 31 31 #include "linux/drbd_nl.h" 32 32 33 - /* declate tag-list-sizes */ 33 + /* declare tag-list-sizes */ 34 34 static const int tag_list_sizes[] = { 35 35 #define NL_PACKET(name, number, fields) 2 fields , 36 36 #define NL_INTEGER(pn, pr, member) + 4 + 4