Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client

Pull Ceph changes from Alex Elder:
"This is a big pull.

Most of it is culmination of Alex's work to implement RBD image
layering, which is now complete (yay!).

There is also some work from Yan to fix i_mutex behavior surrounding
writes in cephfs, a sync write fix, a fix for RBD images that get
resized while they are mapped, and a few patches from me that resolve
annoying auth warnings and fix several bugs in the ceph auth code."

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client: (254 commits)
rbd: fix image request leak on parent read
libceph: use slab cache for osd client requests
libceph: allocate ceph message data with a slab allocator
libceph: allocate ceph messages with a slab allocator
rbd: allocate image object names with a slab allocator
rbd: allocate object requests with a slab allocator
rbd: allocate name separate from obj_request
rbd: allocate image requests with a slab allocator
rbd: use binary search for snapshot lookup
rbd: clear EXISTS flag if mapped snapshot disappears
rbd: kill off the snapshot list
rbd: define rbd_snap_size() and rbd_snap_features()
rbd: use snap_id not index to look up snap info
rbd: look up snapshot name in names buffer
rbd: drop obj_request->version
rbd: drop rbd_obj_method_sync() version parameter
rbd: more version parameter removal
rbd: get rid of some version parameters
rbd: stop tracking header object version
rbd: snap names are pointer to constant data
...

+4238 -2322
-20
Documentation/ABI/testing/sysfs-bus-rbd
··· 66 66 67 67 The current snapshot for which the device is mapped. 68 68 69 - snap_* 70 - 71 - A directory per each snapshot 72 - 73 69 parent 74 70 75 71 Information identifying the pool, image, and snapshot id for 76 72 the parent image in a layered rbd image (format 2 only). 77 - 78 - Entries under /sys/bus/rbd/devices/<dev-id>/snap_<snap-name> 79 - ------------------------------------------------------------- 80 - 81 - snap_id 82 - 83 - The rados internal snapshot id assigned for this snapshot 84 - 85 - snap_size 86 - 87 - The size of the image when this snapshot was taken. 88 - 89 - snap_features 90 - 91 - A hexadecimal encoding of the feature bits for this snapshot. 92 -
+1854 -1044
drivers/block/rbd.c
··· 1 + 1 2 /* 2 3 rbd.c -- Export ceph rados objects as a Linux block device 3 4 ··· 33 32 #include <linux/ceph/mon_client.h> 34 33 #include <linux/ceph/decode.h> 35 34 #include <linux/parser.h> 35 + #include <linux/bsearch.h> 36 36 37 37 #include <linux/kernel.h> 38 38 #include <linux/device.h> 39 39 #include <linux/module.h> 40 40 #include <linux/fs.h> 41 41 #include <linux/blkdev.h> 42 + #include <linux/slab.h> 42 43 43 44 #include "rbd_types.h" 44 45 ··· 55 52 #define SECTOR_SHIFT 9 56 53 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT) 57 54 58 - /* It might be useful to have these defined elsewhere */ 59 - 60 - #define U8_MAX ((u8) (~0U)) 61 - #define U16_MAX ((u16) (~0U)) 62 - #define U32_MAX ((u32) (~0U)) 63 - #define U64_MAX ((u64) (~0ULL)) 64 - 65 55 #define RBD_DRV_NAME "rbd" 66 56 #define RBD_DRV_NAME_LONG "rbd (rados block device)" 67 57 ··· 68 72 69 73 #define RBD_SNAP_HEAD_NAME "-" 70 74 75 + #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */ 76 + 71 77 /* This allows a single page to hold an image name sent by OSD */ 72 78 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1) 73 79 #define RBD_IMAGE_ID_LEN_MAX 64 ··· 78 80 79 81 /* Feature bits */ 80 82 81 - #define RBD_FEATURE_LAYERING 1 83 + #define RBD_FEATURE_LAYERING (1<<0) 84 + #define RBD_FEATURE_STRIPINGV2 (1<<1) 85 + #define RBD_FEATURES_ALL \ 86 + (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2) 82 87 83 88 /* Features supported by this (client software) implementation. */ 84 89 85 - #define RBD_FEATURES_ALL (0) 90 + #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL) 86 91 87 92 /* 88 93 * An RBD device name will be "rbd#", where the "rbd" comes from ··· 113 112 char *snap_names; 114 113 u64 *snap_sizes; 115 114 116 - u64 obj_version; 115 + u64 stripe_unit; 116 + u64 stripe_count; 117 117 }; 118 118 119 119 /* ··· 144 142 */ 145 143 struct rbd_spec { 146 144 u64 pool_id; 147 - char *pool_name; 145 + const char *pool_name; 148 146 149 - char *image_id; 150 - char *image_name; 147 + const char *image_id; 148 + const char *image_name; 151 149 152 150 u64 snap_id; 153 - char *snap_name; 151 + const char *snap_name; 154 152 155 153 struct kref kref; 156 154 }; ··· 176 174 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES 177 175 }; 178 176 177 + enum obj_req_flags { 178 + OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */ 179 + OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */ 180 + OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */ 181 + OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */ 182 + }; 183 + 179 184 struct rbd_obj_request { 180 185 const char *object_name; 181 186 u64 offset; /* object start byte */ 182 187 u64 length; /* bytes from offset */ 188 + unsigned long flags; 183 189 184 - struct rbd_img_request *img_request; 185 - struct list_head links; /* img_request->obj_requests */ 190 + /* 191 + * An object request associated with an image will have its 192 + * img_data flag set; a standalone object request will not. 193 + * 194 + * A standalone object request will have which == BAD_WHICH 195 + * and a null obj_request pointer. 196 + * 197 + * An object request initiated in support of a layered image 198 + * object (to check for its existence before a write) will 199 + * have which == BAD_WHICH and a non-null obj_request pointer. 200 + * 201 + * Finally, an object request for rbd image data will have 202 + * which != BAD_WHICH, and will have a non-null img_request 203 + * pointer. The value of which will be in the range 204 + * 0..(img_request->obj_request_count-1). 205 + */ 206 + union { 207 + struct rbd_obj_request *obj_request; /* STAT op */ 208 + struct { 209 + struct rbd_img_request *img_request; 210 + u64 img_offset; 211 + /* links for img_request->obj_requests list */ 212 + struct list_head links; 213 + }; 214 + }; 186 215 u32 which; /* posn image request list */ 187 216 188 217 enum obj_request_type type; ··· 224 191 u32 page_count; 225 192 }; 226 193 }; 194 + struct page **copyup_pages; 227 195 228 196 struct ceph_osd_request *osd_req; 229 197 230 198 u64 xferred; /* bytes transferred */ 231 - u64 version; 232 199 int result; 233 - atomic_t done; 234 200 235 201 rbd_obj_callback_t callback; 236 202 struct completion completion; ··· 237 205 struct kref kref; 238 206 }; 239 207 208 + enum img_req_flags { 209 + IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */ 210 + IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */ 211 + IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */ 212 + }; 213 + 240 214 struct rbd_img_request { 241 - struct request *rq; 242 215 struct rbd_device *rbd_dev; 243 216 u64 offset; /* starting image byte offset */ 244 217 u64 length; /* byte count from offset */ 245 - bool write_request; /* false for read */ 218 + unsigned long flags; 246 219 union { 220 + u64 snap_id; /* for reads */ 247 221 struct ceph_snap_context *snapc; /* for writes */ 248 - u64 snap_id; /* for reads */ 249 222 }; 223 + union { 224 + struct request *rq; /* block request */ 225 + struct rbd_obj_request *obj_request; /* obj req initiator */ 226 + }; 227 + struct page **copyup_pages; 250 228 spinlock_t completion_lock;/* protects next_completion */ 251 229 u32 next_completion; 252 230 rbd_img_callback_t callback; 231 + u64 xferred;/* aggregate bytes transferred */ 232 + int result; /* first nonzero obj_request result */ 253 233 254 234 u32 obj_request_count; 255 235 struct list_head obj_requests; /* rbd_obj_request structs */ ··· 275 231 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links) 276 232 #define for_each_obj_request_safe(ireq, oreq, n) \ 277 233 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links) 278 - 279 - struct rbd_snap { 280 - struct device dev; 281 - const char *name; 282 - u64 size; 283 - struct list_head node; 284 - u64 id; 285 - u64 features; 286 - }; 287 234 288 235 struct rbd_mapping { 289 236 u64 size; ··· 311 276 312 277 struct rbd_spec *parent_spec; 313 278 u64 parent_overlap; 279 + struct rbd_device *parent; 314 280 315 281 /* protects updating the header */ 316 282 struct rw_semaphore header_rwsem; ··· 319 283 struct rbd_mapping mapping; 320 284 321 285 struct list_head node; 322 - 323 - /* list of snapshots */ 324 - struct list_head snaps; 325 286 326 287 /* sysfs related */ 327 288 struct device dev; ··· 345 312 static LIST_HEAD(rbd_client_list); /* clients */ 346 313 static DEFINE_SPINLOCK(rbd_client_list_lock); 347 314 348 - static int rbd_dev_snaps_update(struct rbd_device *rbd_dev); 349 - static int rbd_dev_snaps_register(struct rbd_device *rbd_dev); 315 + /* Slab caches for frequently-allocated structures */ 350 316 351 - static void rbd_dev_release(struct device *dev); 352 - static void rbd_remove_snap_dev(struct rbd_snap *snap); 317 + static struct kmem_cache *rbd_img_request_cache; 318 + static struct kmem_cache *rbd_obj_request_cache; 319 + static struct kmem_cache *rbd_segment_name_cache; 320 + 321 + static int rbd_img_request_submit(struct rbd_img_request *img_request); 322 + 323 + static void rbd_dev_device_release(struct device *dev); 353 324 354 325 static ssize_t rbd_add(struct bus_type *bus, const char *buf, 355 326 size_t count); 356 327 static ssize_t rbd_remove(struct bus_type *bus, const char *buf, 357 328 size_t count); 329 + static int rbd_dev_image_probe(struct rbd_device *rbd_dev); 358 330 359 331 static struct bus_attribute rbd_bus_attrs[] = { 360 332 __ATTR(add, S_IWUSR, NULL, rbd_add), ··· 421 383 # define rbd_assert(expr) ((void) 0) 422 384 #endif /* !RBD_DEBUG */ 423 385 424 - static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver); 425 - static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver); 386 + static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request); 387 + static void rbd_img_parent_read(struct rbd_obj_request *obj_request); 388 + static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); 389 + 390 + static int rbd_dev_refresh(struct rbd_device *rbd_dev); 391 + static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev); 392 + static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, 393 + u64 snap_id); 394 + static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, 395 + u8 *order, u64 *snap_size); 396 + static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, 397 + u64 *snap_features); 398 + static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name); 426 399 427 400 static int rbd_open(struct block_device *bdev, fmode_t mode) 428 401 { ··· 533 484 return ERR_PTR(ret); 534 485 } 535 486 487 + static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc) 488 + { 489 + kref_get(&rbdc->kref); 490 + 491 + return rbdc; 492 + } 493 + 536 494 /* 537 495 * Find a ceph client with specific addr and configuration. If 538 496 * found, bump its reference count. ··· 555 499 spin_lock(&rbd_client_list_lock); 556 500 list_for_each_entry(client_node, &rbd_client_list, node) { 557 501 if (!ceph_compare_options(ceph_opts, client_node->client)) { 558 - kref_get(&client_node->kref); 502 + __rbd_get_client(client_node); 503 + 559 504 found = true; 560 505 break; 561 506 } ··· 779 722 header->snap_sizes[i] = 780 723 le64_to_cpu(ondisk->snaps[i].image_size); 781 724 } else { 782 - WARN_ON(ondisk->snap_names_len); 783 725 header->snap_names = NULL; 784 726 header->snap_sizes = NULL; 785 727 } ··· 791 735 /* Allocate and fill in the snapshot context */ 792 736 793 737 header->image_size = le64_to_cpu(ondisk->image_size); 794 - size = sizeof (struct ceph_snap_context); 795 - size += snap_count * sizeof (header->snapc->snaps[0]); 796 - header->snapc = kzalloc(size, GFP_KERNEL); 738 + 739 + header->snapc = ceph_create_snap_context(snap_count, GFP_KERNEL); 797 740 if (!header->snapc) 798 741 goto out_err; 799 - 800 - atomic_set(&header->snapc->nref, 1); 801 742 header->snapc->seq = le64_to_cpu(ondisk->snap_seq); 802 - header->snapc->num_snaps = snap_count; 803 743 for (i = 0; i < snap_count; i++) 804 - header->snapc->snaps[i] = 805 - le64_to_cpu(ondisk->snaps[i].id); 744 + header->snapc->snaps[i] = le64_to_cpu(ondisk->snaps[i].id); 806 745 807 746 return 0; 808 747 ··· 812 761 return -ENOMEM; 813 762 } 814 763 764 + static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which) 765 + { 766 + const char *snap_name; 767 + 768 + rbd_assert(which < rbd_dev->header.snapc->num_snaps); 769 + 770 + /* Skip over names until we find the one we are looking for */ 771 + 772 + snap_name = rbd_dev->header.snap_names; 773 + while (which--) 774 + snap_name += strlen(snap_name) + 1; 775 + 776 + return kstrdup(snap_name, GFP_KERNEL); 777 + } 778 + 779 + /* 780 + * Snapshot id comparison function for use with qsort()/bsearch(). 781 + * Note that result is for snapshots in *descending* order. 782 + */ 783 + static int snapid_compare_reverse(const void *s1, const void *s2) 784 + { 785 + u64 snap_id1 = *(u64 *)s1; 786 + u64 snap_id2 = *(u64 *)s2; 787 + 788 + if (snap_id1 < snap_id2) 789 + return 1; 790 + return snap_id1 == snap_id2 ? 0 : -1; 791 + } 792 + 793 + /* 794 + * Search a snapshot context to see if the given snapshot id is 795 + * present. 796 + * 797 + * Returns the position of the snapshot id in the array if it's found, 798 + * or BAD_SNAP_INDEX otherwise. 799 + * 800 + * Note: The snapshot array is in kept sorted (by the osd) in 801 + * reverse order, highest snapshot id first. 802 + */ 803 + static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id) 804 + { 805 + struct ceph_snap_context *snapc = rbd_dev->header.snapc; 806 + u64 *found; 807 + 808 + found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps, 809 + sizeof (snap_id), snapid_compare_reverse); 810 + 811 + return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX; 812 + } 813 + 814 + static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, 815 + u64 snap_id) 816 + { 817 + u32 which; 818 + 819 + which = rbd_dev_snap_index(rbd_dev, snap_id); 820 + if (which == BAD_SNAP_INDEX) 821 + return NULL; 822 + 823 + return _rbd_dev_v1_snap_name(rbd_dev, which); 824 + } 825 + 815 826 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id) 816 827 { 817 - struct rbd_snap *snap; 818 - 819 828 if (snap_id == CEPH_NOSNAP) 820 829 return RBD_SNAP_HEAD_NAME; 821 830 822 - list_for_each_entry(snap, &rbd_dev->snaps, node) 823 - if (snap_id == snap->id) 824 - return snap->name; 831 + rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); 832 + if (rbd_dev->image_format == 1) 833 + return rbd_dev_v1_snap_name(rbd_dev, snap_id); 825 834 826 - return NULL; 835 + return rbd_dev_v2_snap_name(rbd_dev, snap_id); 827 836 } 828 837 829 - static int snap_by_name(struct rbd_device *rbd_dev, const char *snap_name) 838 + static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id, 839 + u64 *snap_size) 830 840 { 841 + rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); 842 + if (snap_id == CEPH_NOSNAP) { 843 + *snap_size = rbd_dev->header.image_size; 844 + } else if (rbd_dev->image_format == 1) { 845 + u32 which; 831 846 832 - struct rbd_snap *snap; 847 + which = rbd_dev_snap_index(rbd_dev, snap_id); 848 + if (which == BAD_SNAP_INDEX) 849 + return -ENOENT; 833 850 834 - list_for_each_entry(snap, &rbd_dev->snaps, node) { 835 - if (!strcmp(snap_name, snap->name)) { 836 - rbd_dev->spec->snap_id = snap->id; 837 - rbd_dev->mapping.size = snap->size; 838 - rbd_dev->mapping.features = snap->features; 851 + *snap_size = rbd_dev->header.snap_sizes[which]; 852 + } else { 853 + u64 size = 0; 854 + int ret; 839 855 840 - return 0; 841 - } 856 + ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size); 857 + if (ret) 858 + return ret; 859 + 860 + *snap_size = size; 842 861 } 843 - 844 - return -ENOENT; 862 + return 0; 845 863 } 846 864 847 - static int rbd_dev_set_mapping(struct rbd_device *rbd_dev) 865 + static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id, 866 + u64 *snap_features) 848 867 { 868 + rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); 869 + if (snap_id == CEPH_NOSNAP) { 870 + *snap_features = rbd_dev->header.features; 871 + } else if (rbd_dev->image_format == 1) { 872 + *snap_features = 0; /* No features for format 1 */ 873 + } else { 874 + u64 features = 0; 875 + int ret; 876 + 877 + ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features); 878 + if (ret) 879 + return ret; 880 + 881 + *snap_features = features; 882 + } 883 + return 0; 884 + } 885 + 886 + static int rbd_dev_mapping_set(struct rbd_device *rbd_dev) 887 + { 888 + const char *snap_name = rbd_dev->spec->snap_name; 889 + u64 snap_id; 890 + u64 size = 0; 891 + u64 features = 0; 849 892 int ret; 850 893 851 - if (!memcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME, 852 - sizeof (RBD_SNAP_HEAD_NAME))) { 853 - rbd_dev->spec->snap_id = CEPH_NOSNAP; 854 - rbd_dev->mapping.size = rbd_dev->header.image_size; 855 - rbd_dev->mapping.features = rbd_dev->header.features; 856 - ret = 0; 894 + if (strcmp(snap_name, RBD_SNAP_HEAD_NAME)) { 895 + snap_id = rbd_snap_id_by_name(rbd_dev, snap_name); 896 + if (snap_id == CEPH_NOSNAP) 897 + return -ENOENT; 857 898 } else { 858 - ret = snap_by_name(rbd_dev, rbd_dev->spec->snap_name); 859 - if (ret < 0) 860 - goto done; 861 - rbd_dev->mapping.read_only = true; 899 + snap_id = CEPH_NOSNAP; 862 900 } 863 - set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 864 901 865 - done: 866 - return ret; 902 + ret = rbd_snap_size(rbd_dev, snap_id, &size); 903 + if (ret) 904 + return ret; 905 + ret = rbd_snap_features(rbd_dev, snap_id, &features); 906 + if (ret) 907 + return ret; 908 + 909 + rbd_dev->mapping.size = size; 910 + rbd_dev->mapping.features = features; 911 + 912 + /* If we are mapping a snapshot it must be marked read-only */ 913 + 914 + if (snap_id != CEPH_NOSNAP) 915 + rbd_dev->mapping.read_only = true; 916 + 917 + return 0; 867 918 } 868 919 869 - static void rbd_header_free(struct rbd_image_header *header) 920 + static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev) 870 921 { 871 - kfree(header->object_prefix); 872 - header->object_prefix = NULL; 873 - kfree(header->snap_sizes); 874 - header->snap_sizes = NULL; 875 - kfree(header->snap_names); 876 - header->snap_names = NULL; 877 - ceph_put_snap_context(header->snapc); 878 - header->snapc = NULL; 922 + rbd_dev->mapping.size = 0; 923 + rbd_dev->mapping.features = 0; 924 + rbd_dev->mapping.read_only = true; 925 + } 926 + 927 + static void rbd_dev_clear_mapping(struct rbd_device *rbd_dev) 928 + { 929 + rbd_dev->mapping.size = 0; 930 + rbd_dev->mapping.features = 0; 931 + rbd_dev->mapping.read_only = true; 879 932 } 880 933 881 934 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset) ··· 988 833 u64 segment; 989 834 int ret; 990 835 991 - name = kmalloc(MAX_OBJ_NAME_SIZE + 1, GFP_NOIO); 836 + name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO); 992 837 if (!name) 993 838 return NULL; 994 839 segment = offset >> rbd_dev->header.obj_order; ··· 1002 847 } 1003 848 1004 849 return name; 850 + } 851 + 852 + static void rbd_segment_name_free(const char *name) 853 + { 854 + /* The explicit cast here is needed to drop the const qualifier */ 855 + 856 + kmem_cache_free(rbd_segment_name_cache, (void *)name); 1005 857 } 1006 858 1007 859 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset) ··· 1079 917 } 1080 918 1081 919 chain = chain->bi_next; 920 + } 921 + } 922 + 923 + /* 924 + * similar to zero_bio_chain(), zeros data defined by a page array, 925 + * starting at the given byte offset from the start of the array and 926 + * continuing up to the given end offset. The pages array is 927 + * assumed to be big enough to hold all bytes up to the end. 928 + */ 929 + static void zero_pages(struct page **pages, u64 offset, u64 end) 930 + { 931 + struct page **page = &pages[offset >> PAGE_SHIFT]; 932 + 933 + rbd_assert(end > offset); 934 + rbd_assert(end - offset <= (u64)SIZE_MAX); 935 + while (offset < end) { 936 + size_t page_offset; 937 + size_t length; 938 + unsigned long flags; 939 + void *kaddr; 940 + 941 + page_offset = (size_t)(offset & ~PAGE_MASK); 942 + length = min(PAGE_SIZE - page_offset, (size_t)(end - offset)); 943 + local_irq_save(flags); 944 + kaddr = kmap_atomic(*page); 945 + memset(kaddr + page_offset, 0, length); 946 + kunmap_atomic(kaddr); 947 + local_irq_restore(flags); 948 + 949 + offset += length; 950 + page++; 1082 951 } 1083 952 } 1084 953 ··· 1257 1064 return NULL; 1258 1065 } 1259 1066 1067 + /* 1068 + * The default/initial value for all object request flags is 0. For 1069 + * each flag, once its value is set to 1 it is never reset to 0 1070 + * again. 1071 + */ 1072 + static void obj_request_img_data_set(struct rbd_obj_request *obj_request) 1073 + { 1074 + if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) { 1075 + struct rbd_device *rbd_dev; 1076 + 1077 + rbd_dev = obj_request->img_request->rbd_dev; 1078 + rbd_warn(rbd_dev, "obj_request %p already marked img_data\n", 1079 + obj_request); 1080 + } 1081 + } 1082 + 1083 + static bool obj_request_img_data_test(struct rbd_obj_request *obj_request) 1084 + { 1085 + smp_mb(); 1086 + return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0; 1087 + } 1088 + 1089 + static void obj_request_done_set(struct rbd_obj_request *obj_request) 1090 + { 1091 + if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) { 1092 + struct rbd_device *rbd_dev = NULL; 1093 + 1094 + if (obj_request_img_data_test(obj_request)) 1095 + rbd_dev = obj_request->img_request->rbd_dev; 1096 + rbd_warn(rbd_dev, "obj_request %p already marked done\n", 1097 + obj_request); 1098 + } 1099 + } 1100 + 1101 + static bool obj_request_done_test(struct rbd_obj_request *obj_request) 1102 + { 1103 + smp_mb(); 1104 + return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0; 1105 + } 1106 + 1107 + /* 1108 + * This sets the KNOWN flag after (possibly) setting the EXISTS 1109 + * flag. The latter is set based on the "exists" value provided. 1110 + * 1111 + * Note that for our purposes once an object exists it never goes 1112 + * away again. It's possible that the response from two existence 1113 + * checks are separated by the creation of the target object, and 1114 + * the first ("doesn't exist") response arrives *after* the second 1115 + * ("does exist"). In that case we ignore the second one. 1116 + */ 1117 + static void obj_request_existence_set(struct rbd_obj_request *obj_request, 1118 + bool exists) 1119 + { 1120 + if (exists) 1121 + set_bit(OBJ_REQ_EXISTS, &obj_request->flags); 1122 + set_bit(OBJ_REQ_KNOWN, &obj_request->flags); 1123 + smp_mb(); 1124 + } 1125 + 1126 + static bool obj_request_known_test(struct rbd_obj_request *obj_request) 1127 + { 1128 + smp_mb(); 1129 + return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0; 1130 + } 1131 + 1132 + static bool obj_request_exists_test(struct rbd_obj_request *obj_request) 1133 + { 1134 + smp_mb(); 1135 + return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0; 1136 + } 1137 + 1260 1138 static void rbd_obj_request_get(struct rbd_obj_request *obj_request) 1261 1139 { 1262 1140 dout("%s: obj %p (was %d)\n", __func__, obj_request, ··· 1365 1101 { 1366 1102 rbd_assert(obj_request->img_request == NULL); 1367 1103 1368 - rbd_obj_request_get(obj_request); 1104 + /* Image request now owns object's original reference */ 1369 1105 obj_request->img_request = img_request; 1370 1106 obj_request->which = img_request->obj_request_count; 1107 + rbd_assert(!obj_request_img_data_test(obj_request)); 1108 + obj_request_img_data_set(obj_request); 1371 1109 rbd_assert(obj_request->which != BAD_WHICH); 1372 1110 img_request->obj_request_count++; 1373 1111 list_add_tail(&obj_request->links, &img_request->obj_requests); ··· 1389 1123 img_request->obj_request_count--; 1390 1124 rbd_assert(obj_request->which == img_request->obj_request_count); 1391 1125 obj_request->which = BAD_WHICH; 1126 + rbd_assert(obj_request_img_data_test(obj_request)); 1392 1127 rbd_assert(obj_request->img_request == img_request); 1393 1128 obj_request->img_request = NULL; 1394 1129 obj_request->callback = NULL; ··· 1408 1141 } 1409 1142 } 1410 1143 1411 - static struct ceph_osd_req_op *rbd_osd_req_op_create(u16 opcode, ...) 1412 - { 1413 - struct ceph_osd_req_op *op; 1414 - va_list args; 1415 - size_t size; 1416 - 1417 - op = kzalloc(sizeof (*op), GFP_NOIO); 1418 - if (!op) 1419 - return NULL; 1420 - op->op = opcode; 1421 - va_start(args, opcode); 1422 - switch (opcode) { 1423 - case CEPH_OSD_OP_READ: 1424 - case CEPH_OSD_OP_WRITE: 1425 - /* rbd_osd_req_op_create(READ, offset, length) */ 1426 - /* rbd_osd_req_op_create(WRITE, offset, length) */ 1427 - op->extent.offset = va_arg(args, u64); 1428 - op->extent.length = va_arg(args, u64); 1429 - if (opcode == CEPH_OSD_OP_WRITE) 1430 - op->payload_len = op->extent.length; 1431 - break; 1432 - case CEPH_OSD_OP_STAT: 1433 - break; 1434 - case CEPH_OSD_OP_CALL: 1435 - /* rbd_osd_req_op_create(CALL, class, method, data, datalen) */ 1436 - op->cls.class_name = va_arg(args, char *); 1437 - size = strlen(op->cls.class_name); 1438 - rbd_assert(size <= (size_t) U8_MAX); 1439 - op->cls.class_len = size; 1440 - op->payload_len = size; 1441 - 1442 - op->cls.method_name = va_arg(args, char *); 1443 - size = strlen(op->cls.method_name); 1444 - rbd_assert(size <= (size_t) U8_MAX); 1445 - op->cls.method_len = size; 1446 - op->payload_len += size; 1447 - 1448 - op->cls.argc = 0; 1449 - op->cls.indata = va_arg(args, void *); 1450 - size = va_arg(args, size_t); 1451 - rbd_assert(size <= (size_t) U32_MAX); 1452 - op->cls.indata_len = (u32) size; 1453 - op->payload_len += size; 1454 - break; 1455 - case CEPH_OSD_OP_NOTIFY_ACK: 1456 - case CEPH_OSD_OP_WATCH: 1457 - /* rbd_osd_req_op_create(NOTIFY_ACK, cookie, version) */ 1458 - /* rbd_osd_req_op_create(WATCH, cookie, version, flag) */ 1459 - op->watch.cookie = va_arg(args, u64); 1460 - op->watch.ver = va_arg(args, u64); 1461 - op->watch.ver = cpu_to_le64(op->watch.ver); 1462 - if (opcode == CEPH_OSD_OP_WATCH && va_arg(args, int)) 1463 - op->watch.flag = (u8) 1; 1464 - break; 1465 - default: 1466 - rbd_warn(NULL, "unsupported opcode %hu\n", opcode); 1467 - kfree(op); 1468 - op = NULL; 1469 - break; 1470 - } 1471 - va_end(args); 1472 - 1473 - return op; 1474 - } 1475 - 1476 - static void rbd_osd_req_op_destroy(struct ceph_osd_req_op *op) 1477 - { 1478 - kfree(op); 1479 - } 1480 - 1481 1144 static int rbd_obj_request_submit(struct ceph_osd_client *osdc, 1482 1145 struct rbd_obj_request *obj_request) 1483 1146 { ··· 1418 1221 1419 1222 static void rbd_img_request_complete(struct rbd_img_request *img_request) 1420 1223 { 1224 + 1421 1225 dout("%s: img %p\n", __func__, img_request); 1226 + 1227 + /* 1228 + * If no error occurred, compute the aggregate transfer 1229 + * count for the image request. We could instead use 1230 + * atomic64_cmpxchg() to update it as each object request 1231 + * completes; not clear which way is better off hand. 1232 + */ 1233 + if (!img_request->result) { 1234 + struct rbd_obj_request *obj_request; 1235 + u64 xferred = 0; 1236 + 1237 + for_each_obj_request(img_request, obj_request) 1238 + xferred += obj_request->xferred; 1239 + img_request->xferred = xferred; 1240 + } 1241 + 1422 1242 if (img_request->callback) 1423 1243 img_request->callback(img_request); 1424 1244 else ··· 1451 1237 return wait_for_completion_interruptible(&obj_request->completion); 1452 1238 } 1453 1239 1454 - static void obj_request_done_init(struct rbd_obj_request *obj_request) 1240 + /* 1241 + * The default/initial value for all image request flags is 0. Each 1242 + * is conditionally set to 1 at image request initialization time 1243 + * and currently never change thereafter. 1244 + */ 1245 + static void img_request_write_set(struct rbd_img_request *img_request) 1455 1246 { 1456 - atomic_set(&obj_request->done, 0); 1457 - smp_wmb(); 1247 + set_bit(IMG_REQ_WRITE, &img_request->flags); 1248 + smp_mb(); 1458 1249 } 1459 1250 1460 - static void obj_request_done_set(struct rbd_obj_request *obj_request) 1461 - { 1462 - int done; 1463 - 1464 - done = atomic_inc_return(&obj_request->done); 1465 - if (done > 1) { 1466 - struct rbd_img_request *img_request = obj_request->img_request; 1467 - struct rbd_device *rbd_dev; 1468 - 1469 - rbd_dev = img_request ? img_request->rbd_dev : NULL; 1470 - rbd_warn(rbd_dev, "obj_request %p was already done\n", 1471 - obj_request); 1472 - } 1473 - } 1474 - 1475 - static bool obj_request_done_test(struct rbd_obj_request *obj_request) 1251 + static bool img_request_write_test(struct rbd_img_request *img_request) 1476 1252 { 1477 1253 smp_mb(); 1478 - return atomic_read(&obj_request->done) != 0; 1254 + return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0; 1255 + } 1256 + 1257 + static void img_request_child_set(struct rbd_img_request *img_request) 1258 + { 1259 + set_bit(IMG_REQ_CHILD, &img_request->flags); 1260 + smp_mb(); 1261 + } 1262 + 1263 + static bool img_request_child_test(struct rbd_img_request *img_request) 1264 + { 1265 + smp_mb(); 1266 + return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0; 1267 + } 1268 + 1269 + static void img_request_layered_set(struct rbd_img_request *img_request) 1270 + { 1271 + set_bit(IMG_REQ_LAYERED, &img_request->flags); 1272 + smp_mb(); 1273 + } 1274 + 1275 + static bool img_request_layered_test(struct rbd_img_request *img_request) 1276 + { 1277 + smp_mb(); 1278 + return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0; 1479 1279 } 1480 1280 1481 1281 static void 1482 1282 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request) 1483 1283 { 1284 + u64 xferred = obj_request->xferred; 1285 + u64 length = obj_request->length; 1286 + 1484 1287 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__, 1485 1288 obj_request, obj_request->img_request, obj_request->result, 1486 - obj_request->xferred, obj_request->length); 1289 + xferred, length); 1487 1290 /* 1488 1291 * ENOENT means a hole in the image. We zero-fill the 1489 1292 * entire length of the request. A short read also implies ··· 1508 1277 * update the xferred count to indicate the whole request 1509 1278 * was satisfied. 1510 1279 */ 1511 - BUG_ON(obj_request->type != OBJ_REQUEST_BIO); 1280 + rbd_assert(obj_request->type != OBJ_REQUEST_NODATA); 1512 1281 if (obj_request->result == -ENOENT) { 1513 - zero_bio_chain(obj_request->bio_list, 0); 1282 + if (obj_request->type == OBJ_REQUEST_BIO) 1283 + zero_bio_chain(obj_request->bio_list, 0); 1284 + else 1285 + zero_pages(obj_request->pages, 0, length); 1514 1286 obj_request->result = 0; 1515 - obj_request->xferred = obj_request->length; 1516 - } else if (obj_request->xferred < obj_request->length && 1517 - !obj_request->result) { 1518 - zero_bio_chain(obj_request->bio_list, obj_request->xferred); 1519 - obj_request->xferred = obj_request->length; 1287 + obj_request->xferred = length; 1288 + } else if (xferred < length && !obj_request->result) { 1289 + if (obj_request->type == OBJ_REQUEST_BIO) 1290 + zero_bio_chain(obj_request->bio_list, xferred); 1291 + else 1292 + zero_pages(obj_request->pages, xferred, length); 1293 + obj_request->xferred = length; 1520 1294 } 1521 1295 obj_request_done_set(obj_request); 1522 1296 } ··· 1544 1308 1545 1309 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request) 1546 1310 { 1547 - dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request, 1548 - obj_request->result, obj_request->xferred, obj_request->length); 1549 - if (obj_request->img_request) 1311 + struct rbd_img_request *img_request = NULL; 1312 + struct rbd_device *rbd_dev = NULL; 1313 + bool layered = false; 1314 + 1315 + if (obj_request_img_data_test(obj_request)) { 1316 + img_request = obj_request->img_request; 1317 + layered = img_request && img_request_layered_test(img_request); 1318 + rbd_dev = img_request->rbd_dev; 1319 + } 1320 + 1321 + dout("%s: obj %p img %p result %d %llu/%llu\n", __func__, 1322 + obj_request, img_request, obj_request->result, 1323 + obj_request->xferred, obj_request->length); 1324 + if (layered && obj_request->result == -ENOENT && 1325 + obj_request->img_offset < rbd_dev->parent_overlap) 1326 + rbd_img_parent_read(obj_request); 1327 + else if (img_request) 1550 1328 rbd_img_obj_request_read_callback(obj_request); 1551 1329 else 1552 1330 obj_request_done_set(obj_request); ··· 1571 1321 dout("%s: obj %p result %d %llu\n", __func__, obj_request, 1572 1322 obj_request->result, obj_request->length); 1573 1323 /* 1574 - * There is no such thing as a successful short write. 1575 - * Our xferred value is the number of bytes transferred 1576 - * back. Set it to our originally-requested length. 1324 + * There is no such thing as a successful short write. Set 1325 + * it to our originally-requested length. 1577 1326 */ 1578 1327 obj_request->xferred = obj_request->length; 1579 1328 obj_request_done_set(obj_request); ··· 1596 1347 1597 1348 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg); 1598 1349 rbd_assert(osd_req == obj_request->osd_req); 1599 - rbd_assert(!!obj_request->img_request ^ 1600 - (obj_request->which == BAD_WHICH)); 1350 + if (obj_request_img_data_test(obj_request)) { 1351 + rbd_assert(obj_request->img_request); 1352 + rbd_assert(obj_request->which != BAD_WHICH); 1353 + } else { 1354 + rbd_assert(obj_request->which == BAD_WHICH); 1355 + } 1601 1356 1602 1357 if (osd_req->r_result < 0) 1603 1358 obj_request->result = osd_req->r_result; 1604 - obj_request->version = le64_to_cpu(osd_req->r_reassert_version.version); 1605 1359 1606 - WARN_ON(osd_req->r_num_ops != 1); /* For now */ 1360 + BUG_ON(osd_req->r_num_ops > 2); 1607 1361 1608 1362 /* 1609 1363 * We support a 64-bit length, but ultimately it has to be 1610 1364 * passed to blk_end_request(), which takes an unsigned int. 1611 1365 */ 1612 1366 obj_request->xferred = osd_req->r_reply_op_len[0]; 1613 - rbd_assert(obj_request->xferred < (u64) UINT_MAX); 1614 - opcode = osd_req->r_request_ops[0].op; 1367 + rbd_assert(obj_request->xferred < (u64)UINT_MAX); 1368 + opcode = osd_req->r_ops[0].op; 1615 1369 switch (opcode) { 1616 1370 case CEPH_OSD_OP_READ: 1617 1371 rbd_osd_read_callback(obj_request); ··· 1640 1388 rbd_obj_request_complete(obj_request); 1641 1389 } 1642 1390 1391 + static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request) 1392 + { 1393 + struct rbd_img_request *img_request = obj_request->img_request; 1394 + struct ceph_osd_request *osd_req = obj_request->osd_req; 1395 + u64 snap_id; 1396 + 1397 + rbd_assert(osd_req != NULL); 1398 + 1399 + snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP; 1400 + ceph_osdc_build_request(osd_req, obj_request->offset, 1401 + NULL, snap_id, NULL); 1402 + } 1403 + 1404 + static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request) 1405 + { 1406 + struct rbd_img_request *img_request = obj_request->img_request; 1407 + struct ceph_osd_request *osd_req = obj_request->osd_req; 1408 + struct ceph_snap_context *snapc; 1409 + struct timespec mtime = CURRENT_TIME; 1410 + 1411 + rbd_assert(osd_req != NULL); 1412 + 1413 + snapc = img_request ? img_request->snapc : NULL; 1414 + ceph_osdc_build_request(osd_req, obj_request->offset, 1415 + snapc, CEPH_NOSNAP, &mtime); 1416 + } 1417 + 1643 1418 static struct ceph_osd_request *rbd_osd_req_create( 1644 1419 struct rbd_device *rbd_dev, 1645 1420 bool write_request, 1646 - struct rbd_obj_request *obj_request, 1647 - struct ceph_osd_req_op *op) 1421 + struct rbd_obj_request *obj_request) 1648 1422 { 1649 - struct rbd_img_request *img_request = obj_request->img_request; 1650 1423 struct ceph_snap_context *snapc = NULL; 1651 1424 struct ceph_osd_client *osdc; 1652 1425 struct ceph_osd_request *osd_req; 1653 - struct timespec now; 1654 - struct timespec *mtime; 1655 - u64 snap_id = CEPH_NOSNAP; 1656 - u64 offset = obj_request->offset; 1657 - u64 length = obj_request->length; 1658 1426 1659 - if (img_request) { 1660 - rbd_assert(img_request->write_request == write_request); 1661 - if (img_request->write_request) 1427 + if (obj_request_img_data_test(obj_request)) { 1428 + struct rbd_img_request *img_request = obj_request->img_request; 1429 + 1430 + rbd_assert(write_request == 1431 + img_request_write_test(img_request)); 1432 + if (write_request) 1662 1433 snapc = img_request->snapc; 1663 - else 1664 - snap_id = img_request->snap_id; 1665 1434 } 1666 1435 1667 1436 /* Allocate and initialize the request, for the single op */ ··· 1692 1419 if (!osd_req) 1693 1420 return NULL; /* ENOMEM */ 1694 1421 1695 - rbd_assert(obj_request_type_valid(obj_request->type)); 1696 - switch (obj_request->type) { 1697 - case OBJ_REQUEST_NODATA: 1698 - break; /* Nothing to do */ 1699 - case OBJ_REQUEST_BIO: 1700 - rbd_assert(obj_request->bio_list != NULL); 1701 - osd_req->r_bio = obj_request->bio_list; 1702 - break; 1703 - case OBJ_REQUEST_PAGES: 1704 - osd_req->r_pages = obj_request->pages; 1705 - osd_req->r_num_pages = obj_request->page_count; 1706 - osd_req->r_page_alignment = offset & ~PAGE_MASK; 1707 - break; 1708 - } 1709 - 1710 - if (write_request) { 1422 + if (write_request) 1711 1423 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK; 1712 - now = CURRENT_TIME; 1713 - mtime = &now; 1714 - } else { 1424 + else 1715 1425 osd_req->r_flags = CEPH_OSD_FLAG_READ; 1716 - mtime = NULL; /* not needed for reads */ 1717 - offset = 0; /* These are not used... */ 1718 - length = 0; /* ...for osd read requests */ 1719 - } 1720 1426 1721 1427 osd_req->r_callback = rbd_osd_req_callback; 1722 1428 osd_req->r_priv = obj_request; ··· 1706 1454 1707 1455 osd_req->r_file_layout = rbd_dev->layout; /* struct */ 1708 1456 1709 - /* osd_req will get its own reference to snapc (if non-null) */ 1457 + return osd_req; 1458 + } 1710 1459 1711 - ceph_osdc_build_request(osd_req, offset, length, 1, op, 1712 - snapc, snap_id, mtime); 1460 + /* 1461 + * Create a copyup osd request based on the information in the 1462 + * object request supplied. A copyup request has two osd ops, 1463 + * a copyup method call, and a "normal" write request. 1464 + */ 1465 + static struct ceph_osd_request * 1466 + rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request) 1467 + { 1468 + struct rbd_img_request *img_request; 1469 + struct ceph_snap_context *snapc; 1470 + struct rbd_device *rbd_dev; 1471 + struct ceph_osd_client *osdc; 1472 + struct ceph_osd_request *osd_req; 1473 + 1474 + rbd_assert(obj_request_img_data_test(obj_request)); 1475 + img_request = obj_request->img_request; 1476 + rbd_assert(img_request); 1477 + rbd_assert(img_request_write_test(img_request)); 1478 + 1479 + /* Allocate and initialize the request, for the two ops */ 1480 + 1481 + snapc = img_request->snapc; 1482 + rbd_dev = img_request->rbd_dev; 1483 + osdc = &rbd_dev->rbd_client->client->osdc; 1484 + osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC); 1485 + if (!osd_req) 1486 + return NULL; /* ENOMEM */ 1487 + 1488 + osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK; 1489 + osd_req->r_callback = rbd_osd_req_callback; 1490 + osd_req->r_priv = obj_request; 1491 + 1492 + osd_req->r_oid_len = strlen(obj_request->object_name); 1493 + rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid)); 1494 + memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len); 1495 + 1496 + osd_req->r_file_layout = rbd_dev->layout; /* struct */ 1713 1497 1714 1498 return osd_req; 1715 1499 } 1500 + 1716 1501 1717 1502 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req) 1718 1503 { ··· 1769 1480 rbd_assert(obj_request_type_valid(type)); 1770 1481 1771 1482 size = strlen(object_name) + 1; 1772 - obj_request = kzalloc(sizeof (*obj_request) + size, GFP_KERNEL); 1773 - if (!obj_request) 1483 + name = kmalloc(size, GFP_KERNEL); 1484 + if (!name) 1774 1485 return NULL; 1775 1486 1776 - name = (char *)(obj_request + 1); 1487 + obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL); 1488 + if (!obj_request) { 1489 + kfree(name); 1490 + return NULL; 1491 + } 1492 + 1777 1493 obj_request->object_name = memcpy(name, object_name, size); 1778 1494 obj_request->offset = offset; 1779 1495 obj_request->length = length; 1496 + obj_request->flags = 0; 1780 1497 obj_request->which = BAD_WHICH; 1781 1498 obj_request->type = type; 1782 1499 INIT_LIST_HEAD(&obj_request->links); 1783 - obj_request_done_init(obj_request); 1784 1500 init_completion(&obj_request->completion); 1785 1501 kref_init(&obj_request->kref); 1786 1502 ··· 1824 1530 break; 1825 1531 } 1826 1532 1827 - kfree(obj_request); 1533 + kfree(obj_request->object_name); 1534 + obj_request->object_name = NULL; 1535 + kmem_cache_free(rbd_obj_request_cache, obj_request); 1828 1536 } 1829 1537 1830 1538 /* ··· 1837 1541 static struct rbd_img_request *rbd_img_request_create( 1838 1542 struct rbd_device *rbd_dev, 1839 1543 u64 offset, u64 length, 1840 - bool write_request) 1544 + bool write_request, 1545 + bool child_request) 1841 1546 { 1842 1547 struct rbd_img_request *img_request; 1843 - struct ceph_snap_context *snapc = NULL; 1844 1548 1845 - img_request = kmalloc(sizeof (*img_request), GFP_ATOMIC); 1549 + img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC); 1846 1550 if (!img_request) 1847 1551 return NULL; 1848 1552 1849 1553 if (write_request) { 1850 1554 down_read(&rbd_dev->header_rwsem); 1851 - snapc = ceph_get_snap_context(rbd_dev->header.snapc); 1555 + ceph_get_snap_context(rbd_dev->header.snapc); 1852 1556 up_read(&rbd_dev->header_rwsem); 1853 - if (WARN_ON(!snapc)) { 1854 - kfree(img_request); 1855 - return NULL; /* Shouldn't happen */ 1856 - } 1857 1557 } 1858 1558 1859 1559 img_request->rq = NULL; 1860 1560 img_request->rbd_dev = rbd_dev; 1861 1561 img_request->offset = offset; 1862 1562 img_request->length = length; 1863 - img_request->write_request = write_request; 1864 - if (write_request) 1865 - img_request->snapc = snapc; 1866 - else 1563 + img_request->flags = 0; 1564 + if (write_request) { 1565 + img_request_write_set(img_request); 1566 + img_request->snapc = rbd_dev->header.snapc; 1567 + } else { 1867 1568 img_request->snap_id = rbd_dev->spec->snap_id; 1569 + } 1570 + if (child_request) 1571 + img_request_child_set(img_request); 1572 + if (rbd_dev->parent_spec) 1573 + img_request_layered_set(img_request); 1868 1574 spin_lock_init(&img_request->completion_lock); 1869 1575 img_request->next_completion = 0; 1870 1576 img_request->callback = NULL; 1577 + img_request->result = 0; 1871 1578 img_request->obj_request_count = 0; 1872 1579 INIT_LIST_HEAD(&img_request->obj_requests); 1873 1580 kref_init(&img_request->kref); ··· 1899 1600 rbd_img_obj_request_del(img_request, obj_request); 1900 1601 rbd_assert(img_request->obj_request_count == 0); 1901 1602 1902 - if (img_request->write_request) 1603 + if (img_request_write_test(img_request)) 1903 1604 ceph_put_snap_context(img_request->snapc); 1904 1605 1905 - kfree(img_request); 1606 + if (img_request_child_test(img_request)) 1607 + rbd_obj_request_put(img_request->obj_request); 1608 + 1609 + kmem_cache_free(rbd_img_request_cache, img_request); 1906 1610 } 1907 1611 1908 - static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, 1909 - struct bio *bio_list) 1612 + static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request) 1613 + { 1614 + struct rbd_img_request *img_request; 1615 + unsigned int xferred; 1616 + int result; 1617 + bool more; 1618 + 1619 + rbd_assert(obj_request_img_data_test(obj_request)); 1620 + img_request = obj_request->img_request; 1621 + 1622 + rbd_assert(obj_request->xferred <= (u64)UINT_MAX); 1623 + xferred = (unsigned int)obj_request->xferred; 1624 + result = obj_request->result; 1625 + if (result) { 1626 + struct rbd_device *rbd_dev = img_request->rbd_dev; 1627 + 1628 + rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n", 1629 + img_request_write_test(img_request) ? "write" : "read", 1630 + obj_request->length, obj_request->img_offset, 1631 + obj_request->offset); 1632 + rbd_warn(rbd_dev, " result %d xferred %x\n", 1633 + result, xferred); 1634 + if (!img_request->result) 1635 + img_request->result = result; 1636 + } 1637 + 1638 + /* Image object requests don't own their page array */ 1639 + 1640 + if (obj_request->type == OBJ_REQUEST_PAGES) { 1641 + obj_request->pages = NULL; 1642 + obj_request->page_count = 0; 1643 + } 1644 + 1645 + if (img_request_child_test(img_request)) { 1646 + rbd_assert(img_request->obj_request != NULL); 1647 + more = obj_request->which < img_request->obj_request_count - 1; 1648 + } else { 1649 + rbd_assert(img_request->rq != NULL); 1650 + more = blk_end_request(img_request->rq, result, xferred); 1651 + } 1652 + 1653 + return more; 1654 + } 1655 + 1656 + static void rbd_img_obj_callback(struct rbd_obj_request *obj_request) 1657 + { 1658 + struct rbd_img_request *img_request; 1659 + u32 which = obj_request->which; 1660 + bool more = true; 1661 + 1662 + rbd_assert(obj_request_img_data_test(obj_request)); 1663 + img_request = obj_request->img_request; 1664 + 1665 + dout("%s: img %p obj %p\n", __func__, img_request, obj_request); 1666 + rbd_assert(img_request != NULL); 1667 + rbd_assert(img_request->obj_request_count > 0); 1668 + rbd_assert(which != BAD_WHICH); 1669 + rbd_assert(which < img_request->obj_request_count); 1670 + rbd_assert(which >= img_request->next_completion); 1671 + 1672 + spin_lock_irq(&img_request->completion_lock); 1673 + if (which != img_request->next_completion) 1674 + goto out; 1675 + 1676 + for_each_obj_request_from(img_request, obj_request) { 1677 + rbd_assert(more); 1678 + rbd_assert(which < img_request->obj_request_count); 1679 + 1680 + if (!obj_request_done_test(obj_request)) 1681 + break; 1682 + more = rbd_img_obj_end_request(obj_request); 1683 + which++; 1684 + } 1685 + 1686 + rbd_assert(more ^ (which == img_request->obj_request_count)); 1687 + img_request->next_completion = which; 1688 + out: 1689 + spin_unlock_irq(&img_request->completion_lock); 1690 + 1691 + if (!more) 1692 + rbd_img_request_complete(img_request); 1693 + } 1694 + 1695 + /* 1696 + * Split up an image request into one or more object requests, each 1697 + * to a different object. The "type" parameter indicates whether 1698 + * "data_desc" is the pointer to the head of a list of bio 1699 + * structures, or the base of a page array. In either case this 1700 + * function assumes data_desc describes memory sufficient to hold 1701 + * all data described by the image request. 1702 + */ 1703 + static int rbd_img_request_fill(struct rbd_img_request *img_request, 1704 + enum obj_request_type type, 1705 + void *data_desc) 1910 1706 { 1911 1707 struct rbd_device *rbd_dev = img_request->rbd_dev; 1912 1708 struct rbd_obj_request *obj_request = NULL; 1913 1709 struct rbd_obj_request *next_obj_request; 1914 - unsigned int bio_offset; 1915 - u64 image_offset; 1710 + bool write_request = img_request_write_test(img_request); 1711 + struct bio *bio_list; 1712 + unsigned int bio_offset = 0; 1713 + struct page **pages; 1714 + u64 img_offset; 1916 1715 u64 resid; 1917 1716 u16 opcode; 1918 1717 1919 - dout("%s: img %p bio %p\n", __func__, img_request, bio_list); 1718 + dout("%s: img %p type %d data_desc %p\n", __func__, img_request, 1719 + (int)type, data_desc); 1920 1720 1921 - opcode = img_request->write_request ? CEPH_OSD_OP_WRITE 1922 - : CEPH_OSD_OP_READ; 1923 - bio_offset = 0; 1924 - image_offset = img_request->offset; 1925 - rbd_assert(image_offset == bio_list->bi_sector << SECTOR_SHIFT); 1721 + opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ; 1722 + img_offset = img_request->offset; 1926 1723 resid = img_request->length; 1927 1724 rbd_assert(resid > 0); 1725 + 1726 + if (type == OBJ_REQUEST_BIO) { 1727 + bio_list = data_desc; 1728 + rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT); 1729 + } else { 1730 + rbd_assert(type == OBJ_REQUEST_PAGES); 1731 + pages = data_desc; 1732 + } 1733 + 1928 1734 while (resid) { 1735 + struct ceph_osd_request *osd_req; 1929 1736 const char *object_name; 1930 - unsigned int clone_size; 1931 - struct ceph_osd_req_op *op; 1932 1737 u64 offset; 1933 1738 u64 length; 1934 1739 1935 - object_name = rbd_segment_name(rbd_dev, image_offset); 1740 + object_name = rbd_segment_name(rbd_dev, img_offset); 1936 1741 if (!object_name) 1937 1742 goto out_unwind; 1938 - offset = rbd_segment_offset(rbd_dev, image_offset); 1939 - length = rbd_segment_length(rbd_dev, image_offset, resid); 1743 + offset = rbd_segment_offset(rbd_dev, img_offset); 1744 + length = rbd_segment_length(rbd_dev, img_offset, resid); 1940 1745 obj_request = rbd_obj_request_create(object_name, 1941 - offset, length, 1942 - OBJ_REQUEST_BIO); 1943 - kfree(object_name); /* object request has its own copy */ 1746 + offset, length, type); 1747 + /* object request has its own copy of the object name */ 1748 + rbd_segment_name_free(object_name); 1944 1749 if (!obj_request) 1945 1750 goto out_unwind; 1946 1751 1947 - rbd_assert(length <= (u64) UINT_MAX); 1948 - clone_size = (unsigned int) length; 1949 - obj_request->bio_list = bio_chain_clone_range(&bio_list, 1950 - &bio_offset, clone_size, 1951 - GFP_ATOMIC); 1952 - if (!obj_request->bio_list) 1953 - goto out_partial; 1752 + if (type == OBJ_REQUEST_BIO) { 1753 + unsigned int clone_size; 1954 1754 1955 - /* 1956 - * Build up the op to use in building the osd 1957 - * request. Note that the contents of the op are 1958 - * copied by rbd_osd_req_create(). 1959 - */ 1960 - op = rbd_osd_req_op_create(opcode, offset, length); 1961 - if (!op) 1962 - goto out_partial; 1963 - obj_request->osd_req = rbd_osd_req_create(rbd_dev, 1964 - img_request->write_request, 1965 - obj_request, op); 1966 - rbd_osd_req_op_destroy(op); 1967 - if (!obj_request->osd_req) 1968 - goto out_partial; 1969 - /* status and version are initially zero-filled */ 1755 + rbd_assert(length <= (u64)UINT_MAX); 1756 + clone_size = (unsigned int)length; 1757 + obj_request->bio_list = 1758 + bio_chain_clone_range(&bio_list, 1759 + &bio_offset, 1760 + clone_size, 1761 + GFP_ATOMIC); 1762 + if (!obj_request->bio_list) 1763 + goto out_partial; 1764 + } else { 1765 + unsigned int page_count; 1970 1766 1767 + obj_request->pages = pages; 1768 + page_count = (u32)calc_pages_for(offset, length); 1769 + obj_request->page_count = page_count; 1770 + if ((offset + length) & ~PAGE_MASK) 1771 + page_count--; /* more on last page */ 1772 + pages += page_count; 1773 + } 1774 + 1775 + osd_req = rbd_osd_req_create(rbd_dev, write_request, 1776 + obj_request); 1777 + if (!osd_req) 1778 + goto out_partial; 1779 + obj_request->osd_req = osd_req; 1780 + obj_request->callback = rbd_img_obj_callback; 1781 + 1782 + osd_req_op_extent_init(osd_req, 0, opcode, offset, length, 1783 + 0, 0); 1784 + if (type == OBJ_REQUEST_BIO) 1785 + osd_req_op_extent_osd_data_bio(osd_req, 0, 1786 + obj_request->bio_list, length); 1787 + else 1788 + osd_req_op_extent_osd_data_pages(osd_req, 0, 1789 + obj_request->pages, length, 1790 + offset & ~PAGE_MASK, false, false); 1791 + 1792 + if (write_request) 1793 + rbd_osd_req_format_write(obj_request); 1794 + else 1795 + rbd_osd_req_format_read(obj_request); 1796 + 1797 + obj_request->img_offset = img_offset; 1971 1798 rbd_img_obj_request_add(img_request, obj_request); 1972 1799 1973 - image_offset += length; 1800 + img_offset += length; 1974 1801 resid -= length; 1975 1802 } 1976 1803 ··· 2111 1686 return -ENOMEM; 2112 1687 } 2113 1688 2114 - static void rbd_img_obj_callback(struct rbd_obj_request *obj_request) 1689 + static void 1690 + rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) 2115 1691 { 2116 1692 struct rbd_img_request *img_request; 2117 - u32 which = obj_request->which; 2118 - bool more = true; 1693 + struct rbd_device *rbd_dev; 1694 + u64 length; 1695 + u32 page_count; 1696 + 1697 + rbd_assert(obj_request->type == OBJ_REQUEST_BIO); 1698 + rbd_assert(obj_request_img_data_test(obj_request)); 1699 + img_request = obj_request->img_request; 1700 + rbd_assert(img_request); 1701 + 1702 + rbd_dev = img_request->rbd_dev; 1703 + rbd_assert(rbd_dev); 1704 + length = (u64)1 << rbd_dev->header.obj_order; 1705 + page_count = (u32)calc_pages_for(0, length); 1706 + 1707 + rbd_assert(obj_request->copyup_pages); 1708 + ceph_release_page_vector(obj_request->copyup_pages, page_count); 1709 + obj_request->copyup_pages = NULL; 1710 + 1711 + /* 1712 + * We want the transfer count to reflect the size of the 1713 + * original write request. There is no such thing as a 1714 + * successful short write, so if the request was successful 1715 + * we can just set it to the originally-requested length. 1716 + */ 1717 + if (!obj_request->result) 1718 + obj_request->xferred = obj_request->length; 1719 + 1720 + /* Finish up with the normal image object callback */ 1721 + 1722 + rbd_img_obj_callback(obj_request); 1723 + } 1724 + 1725 + static void 1726 + rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request) 1727 + { 1728 + struct rbd_obj_request *orig_request; 1729 + struct ceph_osd_request *osd_req; 1730 + struct ceph_osd_client *osdc; 1731 + struct rbd_device *rbd_dev; 1732 + struct page **pages; 1733 + int result; 1734 + u64 obj_size; 1735 + u64 xferred; 1736 + 1737 + rbd_assert(img_request_child_test(img_request)); 1738 + 1739 + /* First get what we need from the image request */ 1740 + 1741 + pages = img_request->copyup_pages; 1742 + rbd_assert(pages != NULL); 1743 + img_request->copyup_pages = NULL; 1744 + 1745 + orig_request = img_request->obj_request; 1746 + rbd_assert(orig_request != NULL); 1747 + rbd_assert(orig_request->type == OBJ_REQUEST_BIO); 1748 + result = img_request->result; 1749 + obj_size = img_request->length; 1750 + xferred = img_request->xferred; 1751 + 1752 + rbd_dev = img_request->rbd_dev; 1753 + rbd_assert(rbd_dev); 1754 + rbd_assert(obj_size == (u64)1 << rbd_dev->header.obj_order); 1755 + 1756 + rbd_img_request_put(img_request); 1757 + 1758 + if (result) 1759 + goto out_err; 1760 + 1761 + /* Allocate the new copyup osd request for the original request */ 1762 + 1763 + result = -ENOMEM; 1764 + rbd_assert(!orig_request->osd_req); 1765 + osd_req = rbd_osd_req_create_copyup(orig_request); 1766 + if (!osd_req) 1767 + goto out_err; 1768 + orig_request->osd_req = osd_req; 1769 + orig_request->copyup_pages = pages; 1770 + 1771 + /* Initialize the copyup op */ 1772 + 1773 + osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup"); 1774 + osd_req_op_cls_request_data_pages(osd_req, 0, pages, obj_size, 0, 1775 + false, false); 1776 + 1777 + /* Then the original write request op */ 1778 + 1779 + osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE, 1780 + orig_request->offset, 1781 + orig_request->length, 0, 0); 1782 + osd_req_op_extent_osd_data_bio(osd_req, 1, orig_request->bio_list, 1783 + orig_request->length); 1784 + 1785 + rbd_osd_req_format_write(orig_request); 1786 + 1787 + /* All set, send it off. */ 1788 + 1789 + orig_request->callback = rbd_img_obj_copyup_callback; 1790 + osdc = &rbd_dev->rbd_client->client->osdc; 1791 + result = rbd_obj_request_submit(osdc, orig_request); 1792 + if (!result) 1793 + return; 1794 + out_err: 1795 + /* Record the error code and complete the request */ 1796 + 1797 + orig_request->result = result; 1798 + orig_request->xferred = 0; 1799 + obj_request_done_set(orig_request); 1800 + rbd_obj_request_complete(orig_request); 1801 + } 1802 + 1803 + /* 1804 + * Read from the parent image the range of data that covers the 1805 + * entire target of the given object request. This is used for 1806 + * satisfying a layered image write request when the target of an 1807 + * object request from the image request does not exist. 1808 + * 1809 + * A page array big enough to hold the returned data is allocated 1810 + * and supplied to rbd_img_request_fill() as the "data descriptor." 1811 + * When the read completes, this page array will be transferred to 1812 + * the original object request for the copyup operation. 1813 + * 1814 + * If an error occurs, record it as the result of the original 1815 + * object request and mark it done so it gets completed. 1816 + */ 1817 + static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request) 1818 + { 1819 + struct rbd_img_request *img_request = NULL; 1820 + struct rbd_img_request *parent_request = NULL; 1821 + struct rbd_device *rbd_dev; 1822 + u64 img_offset; 1823 + u64 length; 1824 + struct page **pages = NULL; 1825 + u32 page_count; 1826 + int result; 1827 + 1828 + rbd_assert(obj_request_img_data_test(obj_request)); 1829 + rbd_assert(obj_request->type == OBJ_REQUEST_BIO); 2119 1830 2120 1831 img_request = obj_request->img_request; 2121 - 2122 - dout("%s: img %p obj %p\n", __func__, img_request, obj_request); 2123 1832 rbd_assert(img_request != NULL); 2124 - rbd_assert(img_request->rq != NULL); 2125 - rbd_assert(img_request->obj_request_count > 0); 2126 - rbd_assert(which != BAD_WHICH); 2127 - rbd_assert(which < img_request->obj_request_count); 2128 - rbd_assert(which >= img_request->next_completion); 1833 + rbd_dev = img_request->rbd_dev; 1834 + rbd_assert(rbd_dev->parent != NULL); 2129 1835 2130 - spin_lock_irq(&img_request->completion_lock); 2131 - if (which != img_request->next_completion) 2132 - goto out; 1836 + /* 1837 + * First things first. The original osd request is of no 1838 + * use to use any more, we'll need a new one that can hold 1839 + * the two ops in a copyup request. We'll get that later, 1840 + * but for now we can release the old one. 1841 + */ 1842 + rbd_osd_req_destroy(obj_request->osd_req); 1843 + obj_request->osd_req = NULL; 2133 1844 2134 - for_each_obj_request_from(img_request, obj_request) { 2135 - unsigned int xferred; 2136 - int result; 1845 + /* 1846 + * Determine the byte range covered by the object in the 1847 + * child image to which the original request was to be sent. 1848 + */ 1849 + img_offset = obj_request->img_offset - obj_request->offset; 1850 + length = (u64)1 << rbd_dev->header.obj_order; 2137 1851 2138 - rbd_assert(more); 2139 - rbd_assert(which < img_request->obj_request_count); 2140 - 2141 - if (!obj_request_done_test(obj_request)) 2142 - break; 2143 - 2144 - rbd_assert(obj_request->xferred <= (u64) UINT_MAX); 2145 - xferred = (unsigned int) obj_request->xferred; 2146 - result = (int) obj_request->result; 2147 - if (result) 2148 - rbd_warn(NULL, "obj_request %s result %d xferred %u\n", 2149 - img_request->write_request ? "write" : "read", 2150 - result, xferred); 2151 - 2152 - more = blk_end_request(img_request->rq, result, xferred); 2153 - which++; 1852 + /* 1853 + * There is no defined parent data beyond the parent 1854 + * overlap, so limit what we read at that boundary if 1855 + * necessary. 1856 + */ 1857 + if (img_offset + length > rbd_dev->parent_overlap) { 1858 + rbd_assert(img_offset < rbd_dev->parent_overlap); 1859 + length = rbd_dev->parent_overlap - img_offset; 2154 1860 } 2155 1861 2156 - rbd_assert(more ^ (which == img_request->obj_request_count)); 2157 - img_request->next_completion = which; 2158 - out: 2159 - spin_unlock_irq(&img_request->completion_lock); 1862 + /* 1863 + * Allocate a page array big enough to receive the data read 1864 + * from the parent. 1865 + */ 1866 + page_count = (u32)calc_pages_for(0, length); 1867 + pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); 1868 + if (IS_ERR(pages)) { 1869 + result = PTR_ERR(pages); 1870 + pages = NULL; 1871 + goto out_err; 1872 + } 2160 1873 2161 - if (!more) 2162 - rbd_img_request_complete(img_request); 1874 + result = -ENOMEM; 1875 + parent_request = rbd_img_request_create(rbd_dev->parent, 1876 + img_offset, length, 1877 + false, true); 1878 + if (!parent_request) 1879 + goto out_err; 1880 + rbd_obj_request_get(obj_request); 1881 + parent_request->obj_request = obj_request; 1882 + 1883 + result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages); 1884 + if (result) 1885 + goto out_err; 1886 + parent_request->copyup_pages = pages; 1887 + 1888 + parent_request->callback = rbd_img_obj_parent_read_full_callback; 1889 + result = rbd_img_request_submit(parent_request); 1890 + if (!result) 1891 + return 0; 1892 + 1893 + parent_request->copyup_pages = NULL; 1894 + parent_request->obj_request = NULL; 1895 + rbd_obj_request_put(obj_request); 1896 + out_err: 1897 + if (pages) 1898 + ceph_release_page_vector(pages, page_count); 1899 + if (parent_request) 1900 + rbd_img_request_put(parent_request); 1901 + obj_request->result = result; 1902 + obj_request->xferred = 0; 1903 + obj_request_done_set(obj_request); 1904 + 1905 + return result; 1906 + } 1907 + 1908 + static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request) 1909 + { 1910 + struct rbd_obj_request *orig_request; 1911 + int result; 1912 + 1913 + rbd_assert(!obj_request_img_data_test(obj_request)); 1914 + 1915 + /* 1916 + * All we need from the object request is the original 1917 + * request and the result of the STAT op. Grab those, then 1918 + * we're done with the request. 1919 + */ 1920 + orig_request = obj_request->obj_request; 1921 + obj_request->obj_request = NULL; 1922 + rbd_assert(orig_request); 1923 + rbd_assert(orig_request->img_request); 1924 + 1925 + result = obj_request->result; 1926 + obj_request->result = 0; 1927 + 1928 + dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__, 1929 + obj_request, orig_request, result, 1930 + obj_request->xferred, obj_request->length); 1931 + rbd_obj_request_put(obj_request); 1932 + 1933 + rbd_assert(orig_request); 1934 + rbd_assert(orig_request->img_request); 1935 + 1936 + /* 1937 + * Our only purpose here is to determine whether the object 1938 + * exists, and we don't want to treat the non-existence as 1939 + * an error. If something else comes back, transfer the 1940 + * error to the original request and complete it now. 1941 + */ 1942 + if (!result) { 1943 + obj_request_existence_set(orig_request, true); 1944 + } else if (result == -ENOENT) { 1945 + obj_request_existence_set(orig_request, false); 1946 + } else if (result) { 1947 + orig_request->result = result; 1948 + goto out; 1949 + } 1950 + 1951 + /* 1952 + * Resubmit the original request now that we have recorded 1953 + * whether the target object exists. 1954 + */ 1955 + orig_request->result = rbd_img_obj_request_submit(orig_request); 1956 + out: 1957 + if (orig_request->result) 1958 + rbd_obj_request_complete(orig_request); 1959 + rbd_obj_request_put(orig_request); 1960 + } 1961 + 1962 + static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request) 1963 + { 1964 + struct rbd_obj_request *stat_request; 1965 + struct rbd_device *rbd_dev; 1966 + struct ceph_osd_client *osdc; 1967 + struct page **pages = NULL; 1968 + u32 page_count; 1969 + size_t size; 1970 + int ret; 1971 + 1972 + /* 1973 + * The response data for a STAT call consists of: 1974 + * le64 length; 1975 + * struct { 1976 + * le32 tv_sec; 1977 + * le32 tv_nsec; 1978 + * } mtime; 1979 + */ 1980 + size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32); 1981 + page_count = (u32)calc_pages_for(0, size); 1982 + pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); 1983 + if (IS_ERR(pages)) 1984 + return PTR_ERR(pages); 1985 + 1986 + ret = -ENOMEM; 1987 + stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0, 1988 + OBJ_REQUEST_PAGES); 1989 + if (!stat_request) 1990 + goto out; 1991 + 1992 + rbd_obj_request_get(obj_request); 1993 + stat_request->obj_request = obj_request; 1994 + stat_request->pages = pages; 1995 + stat_request->page_count = page_count; 1996 + 1997 + rbd_assert(obj_request->img_request); 1998 + rbd_dev = obj_request->img_request->rbd_dev; 1999 + stat_request->osd_req = rbd_osd_req_create(rbd_dev, false, 2000 + stat_request); 2001 + if (!stat_request->osd_req) 2002 + goto out; 2003 + stat_request->callback = rbd_img_obj_exists_callback; 2004 + 2005 + osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT); 2006 + osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0, 2007 + false, false); 2008 + rbd_osd_req_format_read(stat_request); 2009 + 2010 + osdc = &rbd_dev->rbd_client->client->osdc; 2011 + ret = rbd_obj_request_submit(osdc, stat_request); 2012 + out: 2013 + if (ret) 2014 + rbd_obj_request_put(obj_request); 2015 + 2016 + return ret; 2017 + } 2018 + 2019 + static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request) 2020 + { 2021 + struct rbd_img_request *img_request; 2022 + struct rbd_device *rbd_dev; 2023 + bool known; 2024 + 2025 + rbd_assert(obj_request_img_data_test(obj_request)); 2026 + 2027 + img_request = obj_request->img_request; 2028 + rbd_assert(img_request); 2029 + rbd_dev = img_request->rbd_dev; 2030 + 2031 + /* 2032 + * Only writes to layered images need special handling. 2033 + * Reads and non-layered writes are simple object requests. 2034 + * Layered writes that start beyond the end of the overlap 2035 + * with the parent have no parent data, so they too are 2036 + * simple object requests. Finally, if the target object is 2037 + * known to already exist, its parent data has already been 2038 + * copied, so a write to the object can also be handled as a 2039 + * simple object request. 2040 + */ 2041 + if (!img_request_write_test(img_request) || 2042 + !img_request_layered_test(img_request) || 2043 + rbd_dev->parent_overlap <= obj_request->img_offset || 2044 + ((known = obj_request_known_test(obj_request)) && 2045 + obj_request_exists_test(obj_request))) { 2046 + 2047 + struct rbd_device *rbd_dev; 2048 + struct ceph_osd_client *osdc; 2049 + 2050 + rbd_dev = obj_request->img_request->rbd_dev; 2051 + osdc = &rbd_dev->rbd_client->client->osdc; 2052 + 2053 + return rbd_obj_request_submit(osdc, obj_request); 2054 + } 2055 + 2056 + /* 2057 + * It's a layered write. The target object might exist but 2058 + * we may not know that yet. If we know it doesn't exist, 2059 + * start by reading the data for the full target object from 2060 + * the parent so we can use it for a copyup to the target. 2061 + */ 2062 + if (known) 2063 + return rbd_img_obj_parent_read_full(obj_request); 2064 + 2065 + /* We don't know whether the target exists. Go find out. */ 2066 + 2067 + return rbd_img_obj_exists_submit(obj_request); 2163 2068 } 2164 2069 2165 2070 static int rbd_img_request_submit(struct rbd_img_request *img_request) 2166 2071 { 2167 - struct rbd_device *rbd_dev = img_request->rbd_dev; 2168 - struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 2169 2072 struct rbd_obj_request *obj_request; 2170 2073 struct rbd_obj_request *next_obj_request; 2171 2074 ··· 2501 1748 for_each_obj_request_safe(img_request, obj_request, next_obj_request) { 2502 1749 int ret; 2503 1750 2504 - obj_request->callback = rbd_img_obj_callback; 2505 - ret = rbd_obj_request_submit(osdc, obj_request); 1751 + ret = rbd_img_obj_request_submit(obj_request); 2506 1752 if (ret) 2507 1753 return ret; 2508 - /* 2509 - * The image request has its own reference to each 2510 - * of its object requests, so we can safely drop the 2511 - * initial one here. 2512 - */ 2513 - rbd_obj_request_put(obj_request); 2514 1754 } 2515 1755 2516 1756 return 0; 2517 1757 } 2518 1758 2519 - static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, 2520 - u64 ver, u64 notify_id) 1759 + static void rbd_img_parent_read_callback(struct rbd_img_request *img_request) 2521 1760 { 2522 1761 struct rbd_obj_request *obj_request; 2523 - struct ceph_osd_req_op *op; 2524 - struct ceph_osd_client *osdc; 1762 + struct rbd_device *rbd_dev; 1763 + u64 obj_end; 1764 + 1765 + rbd_assert(img_request_child_test(img_request)); 1766 + 1767 + obj_request = img_request->obj_request; 1768 + rbd_assert(obj_request); 1769 + rbd_assert(obj_request->img_request); 1770 + 1771 + obj_request->result = img_request->result; 1772 + if (obj_request->result) 1773 + goto out; 1774 + 1775 + /* 1776 + * We need to zero anything beyond the parent overlap 1777 + * boundary. Since rbd_img_obj_request_read_callback() 1778 + * will zero anything beyond the end of a short read, an 1779 + * easy way to do this is to pretend the data from the 1780 + * parent came up short--ending at the overlap boundary. 1781 + */ 1782 + rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length); 1783 + obj_end = obj_request->img_offset + obj_request->length; 1784 + rbd_dev = obj_request->img_request->rbd_dev; 1785 + if (obj_end > rbd_dev->parent_overlap) { 1786 + u64 xferred = 0; 1787 + 1788 + if (obj_request->img_offset < rbd_dev->parent_overlap) 1789 + xferred = rbd_dev->parent_overlap - 1790 + obj_request->img_offset; 1791 + 1792 + obj_request->xferred = min(img_request->xferred, xferred); 1793 + } else { 1794 + obj_request->xferred = img_request->xferred; 1795 + } 1796 + out: 1797 + rbd_img_request_put(img_request); 1798 + rbd_img_obj_request_read_callback(obj_request); 1799 + rbd_obj_request_complete(obj_request); 1800 + } 1801 + 1802 + static void rbd_img_parent_read(struct rbd_obj_request *obj_request) 1803 + { 1804 + struct rbd_device *rbd_dev; 1805 + struct rbd_img_request *img_request; 1806 + int result; 1807 + 1808 + rbd_assert(obj_request_img_data_test(obj_request)); 1809 + rbd_assert(obj_request->img_request != NULL); 1810 + rbd_assert(obj_request->result == (s32) -ENOENT); 1811 + rbd_assert(obj_request->type == OBJ_REQUEST_BIO); 1812 + 1813 + rbd_dev = obj_request->img_request->rbd_dev; 1814 + rbd_assert(rbd_dev->parent != NULL); 1815 + /* rbd_read_finish(obj_request, obj_request->length); */ 1816 + img_request = rbd_img_request_create(rbd_dev->parent, 1817 + obj_request->img_offset, 1818 + obj_request->length, 1819 + false, true); 1820 + result = -ENOMEM; 1821 + if (!img_request) 1822 + goto out_err; 1823 + 1824 + rbd_obj_request_get(obj_request); 1825 + img_request->obj_request = obj_request; 1826 + 1827 + result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO, 1828 + obj_request->bio_list); 1829 + if (result) 1830 + goto out_err; 1831 + 1832 + img_request->callback = rbd_img_parent_read_callback; 1833 + result = rbd_img_request_submit(img_request); 1834 + if (result) 1835 + goto out_err; 1836 + 1837 + return; 1838 + out_err: 1839 + if (img_request) 1840 + rbd_img_request_put(img_request); 1841 + obj_request->result = result; 1842 + obj_request->xferred = 0; 1843 + obj_request_done_set(obj_request); 1844 + } 1845 + 1846 + static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id) 1847 + { 1848 + struct rbd_obj_request *obj_request; 1849 + struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 2525 1850 int ret; 2526 1851 2527 1852 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0, ··· 2608 1777 return -ENOMEM; 2609 1778 2610 1779 ret = -ENOMEM; 2611 - op = rbd_osd_req_op_create(CEPH_OSD_OP_NOTIFY_ACK, notify_id, ver); 2612 - if (!op) 2613 - goto out; 2614 - obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, 2615 - obj_request, op); 2616 - rbd_osd_req_op_destroy(op); 1780 + obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request); 2617 1781 if (!obj_request->osd_req) 2618 1782 goto out; 2619 - 2620 - osdc = &rbd_dev->rbd_client->client->osdc; 2621 1783 obj_request->callback = rbd_obj_request_put; 1784 + 1785 + osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK, 1786 + notify_id, 0, 0); 1787 + rbd_osd_req_format_read(obj_request); 1788 + 2622 1789 ret = rbd_obj_request_submit(osdc, obj_request); 2623 1790 out: 2624 1791 if (ret) ··· 2628 1799 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) 2629 1800 { 2630 1801 struct rbd_device *rbd_dev = (struct rbd_device *)data; 2631 - u64 hver; 2632 - int rc; 2633 1802 2634 1803 if (!rbd_dev) 2635 1804 return; 2636 1805 2637 1806 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__, 2638 - rbd_dev->header_name, (unsigned long long) notify_id, 2639 - (unsigned int) opcode); 2640 - rc = rbd_dev_refresh(rbd_dev, &hver); 2641 - if (rc) 2642 - rbd_warn(rbd_dev, "got notification but failed to " 2643 - " update snaps: %d\n", rc); 1807 + rbd_dev->header_name, (unsigned long long)notify_id, 1808 + (unsigned int)opcode); 1809 + (void)rbd_dev_refresh(rbd_dev); 2644 1810 2645 - rbd_obj_notify_ack(rbd_dev, hver, notify_id); 1811 + rbd_obj_notify_ack(rbd_dev, notify_id); 2646 1812 } 2647 1813 2648 1814 /* ··· 2648 1824 { 2649 1825 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 2650 1826 struct rbd_obj_request *obj_request; 2651 - struct ceph_osd_req_op *op; 2652 1827 int ret; 2653 1828 2654 1829 rbd_assert(start ^ !!rbd_dev->watch_event); ··· 2667 1844 if (!obj_request) 2668 1845 goto out_cancel; 2669 1846 2670 - op = rbd_osd_req_op_create(CEPH_OSD_OP_WATCH, 2671 - rbd_dev->watch_event->cookie, 2672 - rbd_dev->header.obj_version, start); 2673 - if (!op) 2674 - goto out_cancel; 2675 - obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, 2676 - obj_request, op); 2677 - rbd_osd_req_op_destroy(op); 1847 + obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request); 2678 1848 if (!obj_request->osd_req) 2679 1849 goto out_cancel; 2680 1850 ··· 2676 1860 else 2677 1861 ceph_osdc_unregister_linger_request(osdc, 2678 1862 rbd_dev->watch_request->osd_req); 1863 + 1864 + osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH, 1865 + rbd_dev->watch_event->cookie, 0, start); 1866 + rbd_osd_req_format_write(obj_request); 1867 + 2679 1868 ret = rbd_obj_request_submit(osdc, obj_request); 2680 1869 if (ret) 2681 1870 goto out_cancel; ··· 2720 1899 } 2721 1900 2722 1901 /* 2723 - * Synchronous osd object method call 1902 + * Synchronous osd object method call. Returns the number of bytes 1903 + * returned in the outbound buffer, or a negative error code. 2724 1904 */ 2725 1905 static int rbd_obj_method_sync(struct rbd_device *rbd_dev, 2726 1906 const char *object_name, 2727 1907 const char *class_name, 2728 1908 const char *method_name, 2729 - const char *outbound, 1909 + const void *outbound, 2730 1910 size_t outbound_size, 2731 - char *inbound, 2732 - size_t inbound_size, 2733 - u64 *version) 1911 + void *inbound, 1912 + size_t inbound_size) 2734 1913 { 1914 + struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 2735 1915 struct rbd_obj_request *obj_request; 2736 - struct ceph_osd_client *osdc; 2737 - struct ceph_osd_req_op *op; 2738 1916 struct page **pages; 2739 1917 u32 page_count; 2740 1918 int ret; 2741 1919 2742 1920 /* 2743 - * Method calls are ultimately read operations but they 2744 - * don't involve object data (so no offset or length). 2745 - * The result should placed into the inbound buffer 2746 - * provided. They also supply outbound data--parameters for 2747 - * the object method. Currently if this is present it will 2748 - * be a snapshot id. 1921 + * Method calls are ultimately read operations. The result 1922 + * should placed into the inbound buffer provided. They 1923 + * also supply outbound data--parameters for the object 1924 + * method. Currently if this is present it will be a 1925 + * snapshot id. 2749 1926 */ 2750 - page_count = (u32) calc_pages_for(0, inbound_size); 1927 + page_count = (u32)calc_pages_for(0, inbound_size); 2751 1928 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); 2752 1929 if (IS_ERR(pages)) 2753 1930 return PTR_ERR(pages); 2754 1931 2755 1932 ret = -ENOMEM; 2756 - obj_request = rbd_obj_request_create(object_name, 0, 0, 1933 + obj_request = rbd_obj_request_create(object_name, 0, inbound_size, 2757 1934 OBJ_REQUEST_PAGES); 2758 1935 if (!obj_request) 2759 1936 goto out; ··· 2759 1940 obj_request->pages = pages; 2760 1941 obj_request->page_count = page_count; 2761 1942 2762 - op = rbd_osd_req_op_create(CEPH_OSD_OP_CALL, class_name, 2763 - method_name, outbound, outbound_size); 2764 - if (!op) 2765 - goto out; 2766 - obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, 2767 - obj_request, op); 2768 - rbd_osd_req_op_destroy(op); 1943 + obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request); 2769 1944 if (!obj_request->osd_req) 2770 1945 goto out; 2771 1946 2772 - osdc = &rbd_dev->rbd_client->client->osdc; 1947 + osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL, 1948 + class_name, method_name); 1949 + if (outbound_size) { 1950 + struct ceph_pagelist *pagelist; 1951 + 1952 + pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS); 1953 + if (!pagelist) 1954 + goto out; 1955 + 1956 + ceph_pagelist_init(pagelist); 1957 + ceph_pagelist_append(pagelist, outbound, outbound_size); 1958 + osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0, 1959 + pagelist); 1960 + } 1961 + osd_req_op_cls_response_data_pages(obj_request->osd_req, 0, 1962 + obj_request->pages, inbound_size, 1963 + 0, false, false); 1964 + rbd_osd_req_format_read(obj_request); 1965 + 2773 1966 ret = rbd_obj_request_submit(osdc, obj_request); 2774 1967 if (ret) 2775 1968 goto out; ··· 2792 1961 ret = obj_request->result; 2793 1962 if (ret < 0) 2794 1963 goto out; 2795 - ret = 0; 1964 + 1965 + rbd_assert(obj_request->xferred < (u64)INT_MAX); 1966 + ret = (int)obj_request->xferred; 2796 1967 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred); 2797 - if (version) 2798 - *version = obj_request->version; 2799 1968 out: 2800 1969 if (obj_request) 2801 1970 rbd_obj_request_put(obj_request); ··· 2865 2034 } 2866 2035 2867 2036 result = -EINVAL; 2868 - if (WARN_ON(offset && length > U64_MAX - offset + 1)) 2037 + if (offset && length > U64_MAX - offset + 1) { 2038 + rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n", 2039 + offset, length); 2869 2040 goto end_request; /* Shouldn't happen */ 2041 + } 2870 2042 2871 2043 result = -ENOMEM; 2872 2044 img_request = rbd_img_request_create(rbd_dev, offset, length, 2873 - write_request); 2045 + write_request, false); 2874 2046 if (!img_request) 2875 2047 goto end_request; 2876 2048 2877 2049 img_request->rq = rq; 2878 2050 2879 - result = rbd_img_request_fill_bio(img_request, rq->bio); 2051 + result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO, 2052 + rq->bio); 2880 2053 if (!result) 2881 2054 result = rbd_img_request_submit(img_request); 2882 2055 if (result) ··· 2888 2053 end_request: 2889 2054 spin_lock_irq(q->queue_lock); 2890 2055 if (result < 0) { 2891 - rbd_warn(rbd_dev, "obj_request %s result %d\n", 2892 - write_request ? "write" : "read", result); 2056 + rbd_warn(rbd_dev, "%s %llx at %llx result %d\n", 2057 + write_request ? "write" : "read", 2058 + length, offset, result); 2059 + 2893 2060 __blk_end_request_all(rq, result); 2894 2061 } 2895 2062 } ··· 2950 2113 if (!disk) 2951 2114 return; 2952 2115 2953 - if (disk->flags & GENHD_FL_UP) 2116 + rbd_dev->disk = NULL; 2117 + if (disk->flags & GENHD_FL_UP) { 2954 2118 del_gendisk(disk); 2955 - if (disk->queue) 2956 - blk_cleanup_queue(disk->queue); 2119 + if (disk->queue) 2120 + blk_cleanup_queue(disk->queue); 2121 + } 2957 2122 put_disk(disk); 2958 2123 } 2959 2124 2960 2125 static int rbd_obj_read_sync(struct rbd_device *rbd_dev, 2961 2126 const char *object_name, 2962 - u64 offset, u64 length, 2963 - char *buf, u64 *version) 2127 + u64 offset, u64 length, void *buf) 2964 2128 2965 2129 { 2966 - struct ceph_osd_req_op *op; 2130 + struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 2967 2131 struct rbd_obj_request *obj_request; 2968 - struct ceph_osd_client *osdc; 2969 2132 struct page **pages = NULL; 2970 2133 u32 page_count; 2971 2134 size_t size; ··· 2985 2148 obj_request->pages = pages; 2986 2149 obj_request->page_count = page_count; 2987 2150 2988 - op = rbd_osd_req_op_create(CEPH_OSD_OP_READ, offset, length); 2989 - if (!op) 2990 - goto out; 2991 - obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, 2992 - obj_request, op); 2993 - rbd_osd_req_op_destroy(op); 2151 + obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request); 2994 2152 if (!obj_request->osd_req) 2995 2153 goto out; 2996 2154 2997 - osdc = &rbd_dev->rbd_client->client->osdc; 2155 + osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ, 2156 + offset, length, 0, 0); 2157 + osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0, 2158 + obj_request->pages, 2159 + obj_request->length, 2160 + obj_request->offset & ~PAGE_MASK, 2161 + false, false); 2162 + rbd_osd_req_format_read(obj_request); 2163 + 2998 2164 ret = rbd_obj_request_submit(osdc, obj_request); 2999 2165 if (ret) 3000 2166 goto out; ··· 3012 2172 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX); 3013 2173 size = (size_t) obj_request->xferred; 3014 2174 ceph_copy_from_page_vector(pages, buf, 0, size); 3015 - rbd_assert(size <= (size_t) INT_MAX); 3016 - ret = (int) size; 3017 - if (version) 3018 - *version = obj_request->version; 2175 + rbd_assert(size <= (size_t)INT_MAX); 2176 + ret = (int)size; 3019 2177 out: 3020 2178 if (obj_request) 3021 2179 rbd_obj_request_put(obj_request); ··· 3034 2196 * Returns a pointer-coded errno if a failure occurs. 3035 2197 */ 3036 2198 static struct rbd_image_header_ondisk * 3037 - rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version) 2199 + rbd_dev_v1_header_read(struct rbd_device *rbd_dev) 3038 2200 { 3039 2201 struct rbd_image_header_ondisk *ondisk = NULL; 3040 2202 u32 snap_count = 0; ··· 3062 2224 return ERR_PTR(-ENOMEM); 3063 2225 3064 2226 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name, 3065 - 0, size, 3066 - (char *) ondisk, version); 2227 + 0, size, ondisk); 3067 2228 if (ret < 0) 3068 2229 goto out_err; 3069 - if (WARN_ON((size_t) ret < size)) { 2230 + if ((size_t)ret < size) { 3070 2231 ret = -ENXIO; 3071 2232 rbd_warn(rbd_dev, "short header read (want %zd got %d)", 3072 2233 size, ret); ··· 3097 2260 struct rbd_image_header *header) 3098 2261 { 3099 2262 struct rbd_image_header_ondisk *ondisk; 3100 - u64 ver = 0; 3101 2263 int ret; 3102 2264 3103 - ondisk = rbd_dev_v1_header_read(rbd_dev, &ver); 2265 + ondisk = rbd_dev_v1_header_read(rbd_dev); 3104 2266 if (IS_ERR(ondisk)) 3105 2267 return PTR_ERR(ondisk); 3106 2268 ret = rbd_header_from_disk(header, ondisk); 3107 - if (ret >= 0) 3108 - header->obj_version = ver; 3109 2269 kfree(ondisk); 3110 2270 3111 2271 return ret; 3112 2272 } 3113 2273 3114 - static void rbd_remove_all_snaps(struct rbd_device *rbd_dev) 3115 - { 3116 - struct rbd_snap *snap; 3117 - struct rbd_snap *next; 3118 - 3119 - list_for_each_entry_safe(snap, next, &rbd_dev->snaps, node) 3120 - rbd_remove_snap_dev(snap); 3121 - } 3122 - 3123 2274 static void rbd_update_mapping_size(struct rbd_device *rbd_dev) 3124 2275 { 3125 - sector_t size; 3126 - 3127 2276 if (rbd_dev->spec->snap_id != CEPH_NOSNAP) 3128 2277 return; 3129 2278 3130 - size = (sector_t) rbd_dev->header.image_size / SECTOR_SIZE; 3131 - dout("setting size to %llu sectors", (unsigned long long) size); 3132 - rbd_dev->mapping.size = (u64) size; 3133 - set_capacity(rbd_dev->disk, size); 2279 + if (rbd_dev->mapping.size != rbd_dev->header.image_size) { 2280 + sector_t size; 2281 + 2282 + rbd_dev->mapping.size = rbd_dev->header.image_size; 2283 + size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; 2284 + dout("setting size to %llu sectors", (unsigned long long)size); 2285 + set_capacity(rbd_dev->disk, size); 2286 + } 3134 2287 } 3135 2288 3136 2289 /* 3137 2290 * only read the first part of the ondisk header, without the snaps info 3138 2291 */ 3139 - static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev, u64 *hver) 2292 + static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev) 3140 2293 { 3141 2294 int ret; 3142 2295 struct rbd_image_header h; ··· 3147 2320 /* osd requests may still refer to snapc */ 3148 2321 ceph_put_snap_context(rbd_dev->header.snapc); 3149 2322 3150 - if (hver) 3151 - *hver = h.obj_version; 3152 - rbd_dev->header.obj_version = h.obj_version; 3153 2323 rbd_dev->header.image_size = h.image_size; 3154 2324 rbd_dev->header.snapc = h.snapc; 3155 2325 rbd_dev->header.snap_names = h.snap_names; 3156 2326 rbd_dev->header.snap_sizes = h.snap_sizes; 3157 2327 /* Free the extra copy of the object prefix */ 3158 - WARN_ON(strcmp(rbd_dev->header.object_prefix, h.object_prefix)); 2328 + if (strcmp(rbd_dev->header.object_prefix, h.object_prefix)) 2329 + rbd_warn(rbd_dev, "object prefix changed (ignoring)"); 3159 2330 kfree(h.object_prefix); 3160 - 3161 - ret = rbd_dev_snaps_update(rbd_dev); 3162 - if (!ret) 3163 - ret = rbd_dev_snaps_register(rbd_dev); 3164 2331 3165 2332 up_write(&rbd_dev->header_rwsem); 3166 2333 3167 2334 return ret; 3168 2335 } 3169 2336 3170 - static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver) 2337 + /* 2338 + * Clear the rbd device's EXISTS flag if the snapshot it's mapped to 2339 + * has disappeared from the (just updated) snapshot context. 2340 + */ 2341 + static void rbd_exists_validate(struct rbd_device *rbd_dev) 3171 2342 { 2343 + u64 snap_id; 2344 + 2345 + if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) 2346 + return; 2347 + 2348 + snap_id = rbd_dev->spec->snap_id; 2349 + if (snap_id == CEPH_NOSNAP) 2350 + return; 2351 + 2352 + if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX) 2353 + clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 2354 + } 2355 + 2356 + static int rbd_dev_refresh(struct rbd_device *rbd_dev) 2357 + { 2358 + u64 image_size; 3172 2359 int ret; 3173 2360 3174 2361 rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); 2362 + image_size = rbd_dev->header.image_size; 3175 2363 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); 3176 2364 if (rbd_dev->image_format == 1) 3177 - ret = rbd_dev_v1_refresh(rbd_dev, hver); 2365 + ret = rbd_dev_v1_refresh(rbd_dev); 3178 2366 else 3179 - ret = rbd_dev_v2_refresh(rbd_dev, hver); 2367 + ret = rbd_dev_v2_refresh(rbd_dev); 2368 + 2369 + /* If it's a mapped snapshot, validate its EXISTS flag */ 2370 + 2371 + rbd_exists_validate(rbd_dev); 3180 2372 mutex_unlock(&ctl_mutex); 2373 + if (ret) 2374 + rbd_warn(rbd_dev, "got notification but failed to " 2375 + " update snaps: %d\n", ret); 2376 + if (image_size != rbd_dev->header.image_size) 2377 + revalidate_disk(rbd_dev->disk); 3181 2378 3182 2379 return ret; 3183 2380 } ··· 3245 2394 3246 2395 rbd_dev->disk = disk; 3247 2396 3248 - set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); 3249 - 3250 2397 return 0; 3251 2398 out_disk: 3252 2399 put_disk(disk); ··· 3265 2416 struct device_attribute *attr, char *buf) 3266 2417 { 3267 2418 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 3268 - sector_t size; 3269 2419 3270 - down_read(&rbd_dev->header_rwsem); 3271 - size = get_capacity(rbd_dev->disk); 3272 - up_read(&rbd_dev->header_rwsem); 3273 - 3274 - return sprintf(buf, "%llu\n", (unsigned long long) size * SECTOR_SIZE); 2420 + return sprintf(buf, "%llu\n", 2421 + (unsigned long long)rbd_dev->mapping.size); 3275 2422 } 3276 2423 3277 2424 /* ··· 3280 2435 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 3281 2436 3282 2437 return sprintf(buf, "0x%016llx\n", 3283 - (unsigned long long) rbd_dev->mapping.features); 2438 + (unsigned long long)rbd_dev->mapping.features); 3284 2439 } 3285 2440 3286 2441 static ssize_t rbd_major_show(struct device *dev, ··· 3288 2443 { 3289 2444 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 3290 2445 3291 - return sprintf(buf, "%d\n", rbd_dev->major); 2446 + if (rbd_dev->major) 2447 + return sprintf(buf, "%d\n", rbd_dev->major); 2448 + 2449 + return sprintf(buf, "(none)\n"); 2450 + 3292 2451 } 3293 2452 3294 2453 static ssize_t rbd_client_id_show(struct device *dev, ··· 3318 2469 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 3319 2470 3320 2471 return sprintf(buf, "%llu\n", 3321 - (unsigned long long) rbd_dev->spec->pool_id); 2472 + (unsigned long long) rbd_dev->spec->pool_id); 3322 2473 } 3323 2474 3324 2475 static ssize_t rbd_name_show(struct device *dev, ··· 3404 2555 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 3405 2556 int ret; 3406 2557 3407 - ret = rbd_dev_refresh(rbd_dev, NULL); 2558 + ret = rbd_dev_refresh(rbd_dev); 3408 2559 3409 2560 return ret < 0 ? ret : size; 3410 2561 } ··· 3455 2606 .release = rbd_sysfs_dev_release, 3456 2607 }; 3457 2608 3458 - 3459 - /* 3460 - sysfs - snapshots 3461 - */ 3462 - 3463 - static ssize_t rbd_snap_size_show(struct device *dev, 3464 - struct device_attribute *attr, 3465 - char *buf) 3466 - { 3467 - struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev); 3468 - 3469 - return sprintf(buf, "%llu\n", (unsigned long long)snap->size); 3470 - } 3471 - 3472 - static ssize_t rbd_snap_id_show(struct device *dev, 3473 - struct device_attribute *attr, 3474 - char *buf) 3475 - { 3476 - struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev); 3477 - 3478 - return sprintf(buf, "%llu\n", (unsigned long long)snap->id); 3479 - } 3480 - 3481 - static ssize_t rbd_snap_features_show(struct device *dev, 3482 - struct device_attribute *attr, 3483 - char *buf) 3484 - { 3485 - struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev); 3486 - 3487 - return sprintf(buf, "0x%016llx\n", 3488 - (unsigned long long) snap->features); 3489 - } 3490 - 3491 - static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL); 3492 - static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL); 3493 - static DEVICE_ATTR(snap_features, S_IRUGO, rbd_snap_features_show, NULL); 3494 - 3495 - static struct attribute *rbd_snap_attrs[] = { 3496 - &dev_attr_snap_size.attr, 3497 - &dev_attr_snap_id.attr, 3498 - &dev_attr_snap_features.attr, 3499 - NULL, 3500 - }; 3501 - 3502 - static struct attribute_group rbd_snap_attr_group = { 3503 - .attrs = rbd_snap_attrs, 3504 - }; 3505 - 3506 - static void rbd_snap_dev_release(struct device *dev) 3507 - { 3508 - struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev); 3509 - kfree(snap->name); 3510 - kfree(snap); 3511 - } 3512 - 3513 - static const struct attribute_group *rbd_snap_attr_groups[] = { 3514 - &rbd_snap_attr_group, 3515 - NULL 3516 - }; 3517 - 3518 - static struct device_type rbd_snap_device_type = { 3519 - .groups = rbd_snap_attr_groups, 3520 - .release = rbd_snap_dev_release, 3521 - }; 3522 - 3523 2609 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec) 3524 2610 { 3525 2611 kref_get(&spec->kref); ··· 3477 2693 if (!spec) 3478 2694 return NULL; 3479 2695 kref_init(&spec->kref); 3480 - 3481 - rbd_spec_put(rbd_spec_get(spec)); /* TEMPORARY */ 3482 2696 3483 2697 return spec; 3484 2698 } ··· 3504 2722 spin_lock_init(&rbd_dev->lock); 3505 2723 rbd_dev->flags = 0; 3506 2724 INIT_LIST_HEAD(&rbd_dev->node); 3507 - INIT_LIST_HEAD(&rbd_dev->snaps); 3508 2725 init_rwsem(&rbd_dev->header_rwsem); 3509 2726 3510 2727 rbd_dev->spec = spec; ··· 3521 2740 3522 2741 static void rbd_dev_destroy(struct rbd_device *rbd_dev) 3523 2742 { 3524 - rbd_spec_put(rbd_dev->parent_spec); 3525 - kfree(rbd_dev->header_name); 3526 2743 rbd_put_client(rbd_dev->rbd_client); 3527 2744 rbd_spec_put(rbd_dev->spec); 3528 2745 kfree(rbd_dev); 3529 - } 3530 - 3531 - static bool rbd_snap_registered(struct rbd_snap *snap) 3532 - { 3533 - bool ret = snap->dev.type == &rbd_snap_device_type; 3534 - bool reg = device_is_registered(&snap->dev); 3535 - 3536 - rbd_assert(!ret ^ reg); 3537 - 3538 - return ret; 3539 - } 3540 - 3541 - static void rbd_remove_snap_dev(struct rbd_snap *snap) 3542 - { 3543 - list_del(&snap->node); 3544 - if (device_is_registered(&snap->dev)) 3545 - device_unregister(&snap->dev); 3546 - } 3547 - 3548 - static int rbd_register_snap_dev(struct rbd_snap *snap, 3549 - struct device *parent) 3550 - { 3551 - struct device *dev = &snap->dev; 3552 - int ret; 3553 - 3554 - dev->type = &rbd_snap_device_type; 3555 - dev->parent = parent; 3556 - dev->release = rbd_snap_dev_release; 3557 - dev_set_name(dev, "%s%s", RBD_SNAP_DEV_NAME_PREFIX, snap->name); 3558 - dout("%s: registering device for snapshot %s\n", __func__, snap->name); 3559 - 3560 - ret = device_register(dev); 3561 - 3562 - return ret; 3563 - } 3564 - 3565 - static struct rbd_snap *__rbd_add_snap_dev(struct rbd_device *rbd_dev, 3566 - const char *snap_name, 3567 - u64 snap_id, u64 snap_size, 3568 - u64 snap_features) 3569 - { 3570 - struct rbd_snap *snap; 3571 - int ret; 3572 - 3573 - snap = kzalloc(sizeof (*snap), GFP_KERNEL); 3574 - if (!snap) 3575 - return ERR_PTR(-ENOMEM); 3576 - 3577 - ret = -ENOMEM; 3578 - snap->name = kstrdup(snap_name, GFP_KERNEL); 3579 - if (!snap->name) 3580 - goto err; 3581 - 3582 - snap->id = snap_id; 3583 - snap->size = snap_size; 3584 - snap->features = snap_features; 3585 - 3586 - return snap; 3587 - 3588 - err: 3589 - kfree(snap->name); 3590 - kfree(snap); 3591 - 3592 - return ERR_PTR(ret); 3593 - } 3594 - 3595 - static char *rbd_dev_v1_snap_info(struct rbd_device *rbd_dev, u32 which, 3596 - u64 *snap_size, u64 *snap_features) 3597 - { 3598 - char *snap_name; 3599 - 3600 - rbd_assert(which < rbd_dev->header.snapc->num_snaps); 3601 - 3602 - *snap_size = rbd_dev->header.snap_sizes[which]; 3603 - *snap_features = 0; /* No features for v1 */ 3604 - 3605 - /* Skip over names until we find the one we are looking for */ 3606 - 3607 - snap_name = rbd_dev->header.snap_names; 3608 - while (which--) 3609 - snap_name += strlen(snap_name) + 1; 3610 - 3611 - return snap_name; 3612 2746 } 3613 2747 3614 2748 /* ··· 3543 2847 3544 2848 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, 3545 2849 "rbd", "get_size", 3546 - (char *) &snapid, sizeof (snapid), 3547 - (char *) &size_buf, sizeof (size_buf), NULL); 2850 + &snapid, sizeof (snapid), 2851 + &size_buf, sizeof (size_buf)); 3548 2852 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 3549 2853 if (ret < 0) 3550 2854 return ret; 2855 + if (ret < sizeof (size_buf)) 2856 + return -ERANGE; 3551 2857 3552 - *order = size_buf.order; 2858 + if (order) 2859 + *order = size_buf.order; 3553 2860 *snap_size = le64_to_cpu(size_buf.size); 3554 2861 3555 2862 dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n", 3556 - (unsigned long long) snap_id, (unsigned int) *order, 3557 - (unsigned long long) *snap_size); 2863 + (unsigned long long)snap_id, (unsigned int)*order, 2864 + (unsigned long long)*snap_size); 3558 2865 3559 2866 return 0; 3560 2867 } ··· 3580 2881 return -ENOMEM; 3581 2882 3582 2883 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, 3583 - "rbd", "get_object_prefix", 3584 - NULL, 0, 3585 - reply_buf, RBD_OBJ_PREFIX_LEN_MAX, NULL); 2884 + "rbd", "get_object_prefix", NULL, 0, 2885 + reply_buf, RBD_OBJ_PREFIX_LEN_MAX); 3586 2886 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 3587 2887 if (ret < 0) 3588 2888 goto out; 3589 2889 3590 2890 p = reply_buf; 3591 2891 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p, 3592 - p + RBD_OBJ_PREFIX_LEN_MAX, 3593 - NULL, GFP_NOIO); 2892 + p + ret, NULL, GFP_NOIO); 2893 + ret = 0; 3594 2894 3595 2895 if (IS_ERR(rbd_dev->header.object_prefix)) { 3596 2896 ret = PTR_ERR(rbd_dev->header.object_prefix); ··· 3597 2899 } else { 3598 2900 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix); 3599 2901 } 3600 - 3601 2902 out: 3602 2903 kfree(reply_buf); 3603 2904 ··· 3610 2913 struct { 3611 2914 __le64 features; 3612 2915 __le64 incompat; 3613 - } features_buf = { 0 }; 2916 + } __attribute__ ((packed)) features_buf = { 0 }; 3614 2917 u64 incompat; 3615 2918 int ret; 3616 2919 3617 2920 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, 3618 2921 "rbd", "get_features", 3619 - (char *) &snapid, sizeof (snapid), 3620 - (char *) &features_buf, sizeof (features_buf), 3621 - NULL); 2922 + &snapid, sizeof (snapid), 2923 + &features_buf, sizeof (features_buf)); 3622 2924 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 3623 2925 if (ret < 0) 3624 2926 return ret; 2927 + if (ret < sizeof (features_buf)) 2928 + return -ERANGE; 3625 2929 3626 2930 incompat = le64_to_cpu(features_buf.incompat); 3627 - if (incompat & ~RBD_FEATURES_ALL) 2931 + if (incompat & ~RBD_FEATURES_SUPPORTED) 3628 2932 return -ENXIO; 3629 2933 3630 2934 *snap_features = le64_to_cpu(features_buf.features); 3631 2935 3632 2936 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n", 3633 - (unsigned long long) snap_id, 3634 - (unsigned long long) *snap_features, 3635 - (unsigned long long) le64_to_cpu(features_buf.incompat)); 2937 + (unsigned long long)snap_id, 2938 + (unsigned long long)*snap_features, 2939 + (unsigned long long)le64_to_cpu(features_buf.incompat)); 3636 2940 3637 2941 return 0; 3638 2942 } ··· 3673 2975 snapid = cpu_to_le64(CEPH_NOSNAP); 3674 2976 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, 3675 2977 "rbd", "get_parent", 3676 - (char *) &snapid, sizeof (snapid), 3677 - (char *) reply_buf, size, NULL); 2978 + &snapid, sizeof (snapid), 2979 + reply_buf, size); 3678 2980 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 3679 2981 if (ret < 0) 3680 2982 goto out_err; 3681 2983 3682 - ret = -ERANGE; 3683 2984 p = reply_buf; 3684 - end = (char *) reply_buf + size; 2985 + end = reply_buf + ret; 2986 + ret = -ERANGE; 3685 2987 ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err); 3686 2988 if (parent_spec->pool_id == CEPH_NOPOOL) 3687 2989 goto out; /* No parent? No problem. */ ··· 3689 2991 /* The ceph file layout needs to fit pool id in 32 bits */ 3690 2992 3691 2993 ret = -EIO; 3692 - if (WARN_ON(parent_spec->pool_id > (u64) U32_MAX)) 3693 - goto out; 2994 + if (parent_spec->pool_id > (u64)U32_MAX) { 2995 + rbd_warn(NULL, "parent pool id too large (%llu > %u)\n", 2996 + (unsigned long long)parent_spec->pool_id, U32_MAX); 2997 + goto out_err; 2998 + } 3694 2999 3695 3000 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); 3696 3001 if (IS_ERR(image_id)) { ··· 3714 3013 rbd_spec_put(parent_spec); 3715 3014 3716 3015 return ret; 3016 + } 3017 + 3018 + static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev) 3019 + { 3020 + struct { 3021 + __le64 stripe_unit; 3022 + __le64 stripe_count; 3023 + } __attribute__ ((packed)) striping_info_buf = { 0 }; 3024 + size_t size = sizeof (striping_info_buf); 3025 + void *p; 3026 + u64 obj_size; 3027 + u64 stripe_unit; 3028 + u64 stripe_count; 3029 + int ret; 3030 + 3031 + ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, 3032 + "rbd", "get_stripe_unit_count", NULL, 0, 3033 + (char *)&striping_info_buf, size); 3034 + dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 3035 + if (ret < 0) 3036 + return ret; 3037 + if (ret < size) 3038 + return -ERANGE; 3039 + 3040 + /* 3041 + * We don't actually support the "fancy striping" feature 3042 + * (STRIPINGV2) yet, but if the striping sizes are the 3043 + * defaults the behavior is the same as before. So find 3044 + * out, and only fail if the image has non-default values. 3045 + */ 3046 + ret = -EINVAL; 3047 + obj_size = (u64)1 << rbd_dev->header.obj_order; 3048 + p = &striping_info_buf; 3049 + stripe_unit = ceph_decode_64(&p); 3050 + if (stripe_unit != obj_size) { 3051 + rbd_warn(rbd_dev, "unsupported stripe unit " 3052 + "(got %llu want %llu)", 3053 + stripe_unit, obj_size); 3054 + return -EINVAL; 3055 + } 3056 + stripe_count = ceph_decode_64(&p); 3057 + if (stripe_count != 1) { 3058 + rbd_warn(rbd_dev, "unsupported stripe count " 3059 + "(got %llu want 1)", stripe_count); 3060 + return -EINVAL; 3061 + } 3062 + rbd_dev->header.stripe_unit = stripe_unit; 3063 + rbd_dev->header.stripe_count = stripe_count; 3064 + 3065 + return 0; 3717 3066 } 3718 3067 3719 3068 static char *rbd_dev_image_name(struct rbd_device *rbd_dev) ··· 3787 3036 return NULL; 3788 3037 3789 3038 p = image_id; 3790 - end = (char *) image_id + image_id_size; 3791 - ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32) len); 3039 + end = image_id + image_id_size; 3040 + ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len); 3792 3041 3793 3042 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX; 3794 3043 reply_buf = kmalloc(size, GFP_KERNEL); ··· 3798 3047 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY, 3799 3048 "rbd", "dir_get_name", 3800 3049 image_id, image_id_size, 3801 - (char *) reply_buf, size, NULL); 3050 + reply_buf, size); 3802 3051 if (ret < 0) 3803 3052 goto out; 3804 3053 p = reply_buf; 3805 - end = (char *) reply_buf + size; 3054 + end = reply_buf + ret; 3055 + 3806 3056 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL); 3807 3057 if (IS_ERR(image_name)) 3808 3058 image_name = NULL; ··· 3816 3064 return image_name; 3817 3065 } 3818 3066 3819 - /* 3820 - * When a parent image gets probed, we only have the pool, image, 3821 - * and snapshot ids but not the names of any of them. This call 3822 - * is made later to fill in those names. It has to be done after 3823 - * rbd_dev_snaps_update() has completed because some of the 3824 - * information (in particular, snapshot name) is not available 3825 - * until then. 3826 - */ 3827 - static int rbd_dev_probe_update_spec(struct rbd_device *rbd_dev) 3067 + static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name) 3828 3068 { 3829 - struct ceph_osd_client *osdc; 3830 - const char *name; 3831 - void *reply_buf = NULL; 3069 + struct ceph_snap_context *snapc = rbd_dev->header.snapc; 3070 + const char *snap_name; 3071 + u32 which = 0; 3072 + 3073 + /* Skip over names until we find the one we are looking for */ 3074 + 3075 + snap_name = rbd_dev->header.snap_names; 3076 + while (which < snapc->num_snaps) { 3077 + if (!strcmp(name, snap_name)) 3078 + return snapc->snaps[which]; 3079 + snap_name += strlen(snap_name) + 1; 3080 + which++; 3081 + } 3082 + return CEPH_NOSNAP; 3083 + } 3084 + 3085 + static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name) 3086 + { 3087 + struct ceph_snap_context *snapc = rbd_dev->header.snapc; 3088 + u32 which; 3089 + bool found = false; 3090 + u64 snap_id; 3091 + 3092 + for (which = 0; !found && which < snapc->num_snaps; which++) { 3093 + const char *snap_name; 3094 + 3095 + snap_id = snapc->snaps[which]; 3096 + snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id); 3097 + if (IS_ERR(snap_name)) 3098 + break; 3099 + found = !strcmp(name, snap_name); 3100 + kfree(snap_name); 3101 + } 3102 + return found ? snap_id : CEPH_NOSNAP; 3103 + } 3104 + 3105 + /* 3106 + * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if 3107 + * no snapshot by that name is found, or if an error occurs. 3108 + */ 3109 + static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name) 3110 + { 3111 + if (rbd_dev->image_format == 1) 3112 + return rbd_v1_snap_id_by_name(rbd_dev, name); 3113 + 3114 + return rbd_v2_snap_id_by_name(rbd_dev, name); 3115 + } 3116 + 3117 + /* 3118 + * When an rbd image has a parent image, it is identified by the 3119 + * pool, image, and snapshot ids (not names). This function fills 3120 + * in the names for those ids. (It's OK if we can't figure out the 3121 + * name for an image id, but the pool and snapshot ids should always 3122 + * exist and have names.) All names in an rbd spec are dynamically 3123 + * allocated. 3124 + * 3125 + * When an image being mapped (not a parent) is probed, we have the 3126 + * pool name and pool id, image name and image id, and the snapshot 3127 + * name. The only thing we're missing is the snapshot id. 3128 + */ 3129 + static int rbd_dev_spec_update(struct rbd_device *rbd_dev) 3130 + { 3131 + struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3132 + struct rbd_spec *spec = rbd_dev->spec; 3133 + const char *pool_name; 3134 + const char *image_name; 3135 + const char *snap_name; 3832 3136 int ret; 3833 3137 3834 - if (rbd_dev->spec->pool_name) 3835 - return 0; /* Already have the names */ 3138 + /* 3139 + * An image being mapped will have the pool name (etc.), but 3140 + * we need to look up the snapshot id. 3141 + */ 3142 + if (spec->pool_name) { 3143 + if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) { 3144 + u64 snap_id; 3836 3145 3837 - /* Look up the pool name */ 3146 + snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name); 3147 + if (snap_id == CEPH_NOSNAP) 3148 + return -ENOENT; 3149 + spec->snap_id = snap_id; 3150 + } else { 3151 + spec->snap_id = CEPH_NOSNAP; 3152 + } 3838 3153 3839 - osdc = &rbd_dev->rbd_client->client->osdc; 3840 - name = ceph_pg_pool_name_by_id(osdc->osdmap, rbd_dev->spec->pool_id); 3841 - if (!name) { 3842 - rbd_warn(rbd_dev, "there is no pool with id %llu", 3843 - rbd_dev->spec->pool_id); /* Really a BUG() */ 3844 - return -EIO; 3154 + return 0; 3845 3155 } 3846 3156 3847 - rbd_dev->spec->pool_name = kstrdup(name, GFP_KERNEL); 3848 - if (!rbd_dev->spec->pool_name) 3157 + /* Get the pool name; we have to make our own copy of this */ 3158 + 3159 + pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id); 3160 + if (!pool_name) { 3161 + rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id); 3162 + return -EIO; 3163 + } 3164 + pool_name = kstrdup(pool_name, GFP_KERNEL); 3165 + if (!pool_name) 3849 3166 return -ENOMEM; 3850 3167 3851 3168 /* Fetch the image name; tolerate failure here */ 3852 3169 3853 - name = rbd_dev_image_name(rbd_dev); 3854 - if (name) 3855 - rbd_dev->spec->image_name = (char *) name; 3856 - else 3170 + image_name = rbd_dev_image_name(rbd_dev); 3171 + if (!image_name) 3857 3172 rbd_warn(rbd_dev, "unable to get image name"); 3858 3173 3859 - /* Look up the snapshot name. */ 3174 + /* Look up the snapshot name, and make a copy */ 3860 3175 3861 - name = rbd_snap_name(rbd_dev, rbd_dev->spec->snap_id); 3862 - if (!name) { 3863 - rbd_warn(rbd_dev, "no snapshot with id %llu", 3864 - rbd_dev->spec->snap_id); /* Really a BUG() */ 3865 - ret = -EIO; 3176 + snap_name = rbd_snap_name(rbd_dev, spec->snap_id); 3177 + if (!snap_name) { 3178 + ret = -ENOMEM; 3866 3179 goto out_err; 3867 3180 } 3868 - rbd_dev->spec->snap_name = kstrdup(name, GFP_KERNEL); 3869 - if(!rbd_dev->spec->snap_name) 3870 - goto out_err; 3181 + 3182 + spec->pool_name = pool_name; 3183 + spec->image_name = image_name; 3184 + spec->snap_name = snap_name; 3871 3185 3872 3186 return 0; 3873 3187 out_err: 3874 - kfree(reply_buf); 3875 - kfree(rbd_dev->spec->pool_name); 3876 - rbd_dev->spec->pool_name = NULL; 3188 + kfree(image_name); 3189 + kfree(pool_name); 3877 3190 3878 3191 return ret; 3879 3192 } 3880 3193 3881 - static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver) 3194 + static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev) 3882 3195 { 3883 3196 size_t size; 3884 3197 int ret; ··· 3968 3151 return -ENOMEM; 3969 3152 3970 3153 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, 3971 - "rbd", "get_snapcontext", 3972 - NULL, 0, 3973 - reply_buf, size, ver); 3154 + "rbd", "get_snapcontext", NULL, 0, 3155 + reply_buf, size); 3974 3156 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 3975 3157 if (ret < 0) 3976 3158 goto out; 3977 3159 3978 - ret = -ERANGE; 3979 3160 p = reply_buf; 3980 - end = (char *) reply_buf + size; 3161 + end = reply_buf + ret; 3162 + ret = -ERANGE; 3981 3163 ceph_decode_64_safe(&p, end, seq, out); 3982 3164 ceph_decode_32_safe(&p, end, snap_count, out); 3983 3165 ··· 3993 3177 } 3994 3178 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64))) 3995 3179 goto out; 3180 + ret = 0; 3996 3181 3997 - size = sizeof (struct ceph_snap_context) + 3998 - snap_count * sizeof (snapc->snaps[0]); 3999 - snapc = kmalloc(size, GFP_KERNEL); 3182 + snapc = ceph_create_snap_context(snap_count, GFP_KERNEL); 4000 3183 if (!snapc) { 4001 3184 ret = -ENOMEM; 4002 3185 goto out; 4003 3186 } 4004 - 4005 - atomic_set(&snapc->nref, 1); 4006 3187 snapc->seq = seq; 4007 - snapc->num_snaps = snap_count; 4008 3188 for (i = 0; i < snap_count; i++) 4009 3189 snapc->snaps[i] = ceph_decode_64(&p); 4010 3190 4011 3191 rbd_dev->header.snapc = snapc; 4012 3192 4013 3193 dout(" snap context seq = %llu, snap_count = %u\n", 4014 - (unsigned long long) seq, (unsigned int) snap_count); 4015 - 3194 + (unsigned long long)seq, (unsigned int)snap_count); 4016 3195 out: 4017 3196 kfree(reply_buf); 4018 3197 4019 - return 0; 3198 + return ret; 4020 3199 } 4021 3200 4022 - static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which) 3201 + static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, 3202 + u64 snap_id) 4023 3203 { 4024 3204 size_t size; 4025 3205 void *reply_buf; 4026 - __le64 snap_id; 3206 + __le64 snapid; 4027 3207 int ret; 4028 3208 void *p; 4029 3209 void *end; ··· 4030 3218 if (!reply_buf) 4031 3219 return ERR_PTR(-ENOMEM); 4032 3220 4033 - snap_id = cpu_to_le64(rbd_dev->header.snapc->snaps[which]); 3221 + snapid = cpu_to_le64(snap_id); 4034 3222 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, 4035 3223 "rbd", "get_snapshot_name", 4036 - (char *) &snap_id, sizeof (snap_id), 4037 - reply_buf, size, NULL); 3224 + &snapid, sizeof (snapid), 3225 + reply_buf, size); 4038 3226 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 4039 - if (ret < 0) 3227 + if (ret < 0) { 3228 + snap_name = ERR_PTR(ret); 4040 3229 goto out; 3230 + } 4041 3231 4042 3232 p = reply_buf; 4043 - end = (char *) reply_buf + size; 3233 + end = reply_buf + ret; 4044 3234 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); 4045 - if (IS_ERR(snap_name)) { 4046 - ret = PTR_ERR(snap_name); 3235 + if (IS_ERR(snap_name)) 4047 3236 goto out; 4048 - } else { 4049 - dout(" snap_id 0x%016llx snap_name = %s\n", 4050 - (unsigned long long) le64_to_cpu(snap_id), snap_name); 4051 - } 3237 + 3238 + dout(" snap_id 0x%016llx snap_name = %s\n", 3239 + (unsigned long long)snap_id, snap_name); 3240 + out: 4052 3241 kfree(reply_buf); 4053 3242 4054 3243 return snap_name; 4055 - out: 4056 - kfree(reply_buf); 4057 - 4058 - return ERR_PTR(ret); 4059 3244 } 4060 3245 4061 - static char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which, 4062 - u64 *snap_size, u64 *snap_features) 4063 - { 4064 - u64 snap_id; 4065 - u8 order; 4066 - int ret; 4067 - 4068 - snap_id = rbd_dev->header.snapc->snaps[which]; 4069 - ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, &order, snap_size); 4070 - if (ret) 4071 - return ERR_PTR(ret); 4072 - ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, snap_features); 4073 - if (ret) 4074 - return ERR_PTR(ret); 4075 - 4076 - return rbd_dev_v2_snap_name(rbd_dev, which); 4077 - } 4078 - 4079 - static char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which, 4080 - u64 *snap_size, u64 *snap_features) 4081 - { 4082 - if (rbd_dev->image_format == 1) 4083 - return rbd_dev_v1_snap_info(rbd_dev, which, 4084 - snap_size, snap_features); 4085 - if (rbd_dev->image_format == 2) 4086 - return rbd_dev_v2_snap_info(rbd_dev, which, 4087 - snap_size, snap_features); 4088 - return ERR_PTR(-EINVAL); 4089 - } 4090 - 4091 - static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver) 3246 + static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev) 4092 3247 { 4093 3248 int ret; 4094 - __u8 obj_order; 4095 3249 4096 3250 down_write(&rbd_dev->header_rwsem); 4097 3251 4098 - /* Grab old order first, to see if it changes */ 4099 - 4100 - obj_order = rbd_dev->header.obj_order, 4101 3252 ret = rbd_dev_v2_image_size(rbd_dev); 4102 3253 if (ret) 4103 3254 goto out; 4104 - if (rbd_dev->header.obj_order != obj_order) { 4105 - ret = -EIO; 4106 - goto out; 4107 - } 4108 3255 rbd_update_mapping_size(rbd_dev); 4109 3256 4110 - ret = rbd_dev_v2_snap_context(rbd_dev, hver); 3257 + ret = rbd_dev_v2_snap_context(rbd_dev); 4111 3258 dout("rbd_dev_v2_snap_context returned %d\n", ret); 4112 3259 if (ret) 4113 3260 goto out; 4114 - ret = rbd_dev_snaps_update(rbd_dev); 4115 - dout("rbd_dev_snaps_update returned %d\n", ret); 4116 - if (ret) 4117 - goto out; 4118 - ret = rbd_dev_snaps_register(rbd_dev); 4119 - dout("rbd_dev_snaps_register returned %d\n", ret); 4120 3261 out: 4121 3262 up_write(&rbd_dev->header_rwsem); 4122 - 4123 - return ret; 4124 - } 4125 - 4126 - /* 4127 - * Scan the rbd device's current snapshot list and compare it to the 4128 - * newly-received snapshot context. Remove any existing snapshots 4129 - * not present in the new snapshot context. Add a new snapshot for 4130 - * any snaphots in the snapshot context not in the current list. 4131 - * And verify there are no changes to snapshots we already know 4132 - * about. 4133 - * 4134 - * Assumes the snapshots in the snapshot context are sorted by 4135 - * snapshot id, highest id first. (Snapshots in the rbd_dev's list 4136 - * are also maintained in that order.) 4137 - */ 4138 - static int rbd_dev_snaps_update(struct rbd_device *rbd_dev) 4139 - { 4140 - struct ceph_snap_context *snapc = rbd_dev->header.snapc; 4141 - const u32 snap_count = snapc->num_snaps; 4142 - struct list_head *head = &rbd_dev->snaps; 4143 - struct list_head *links = head->next; 4144 - u32 index = 0; 4145 - 4146 - dout("%s: snap count is %u\n", __func__, (unsigned int) snap_count); 4147 - while (index < snap_count || links != head) { 4148 - u64 snap_id; 4149 - struct rbd_snap *snap; 4150 - char *snap_name; 4151 - u64 snap_size = 0; 4152 - u64 snap_features = 0; 4153 - 4154 - snap_id = index < snap_count ? snapc->snaps[index] 4155 - : CEPH_NOSNAP; 4156 - snap = links != head ? list_entry(links, struct rbd_snap, node) 4157 - : NULL; 4158 - rbd_assert(!snap || snap->id != CEPH_NOSNAP); 4159 - 4160 - if (snap_id == CEPH_NOSNAP || (snap && snap->id > snap_id)) { 4161 - struct list_head *next = links->next; 4162 - 4163 - /* 4164 - * A previously-existing snapshot is not in 4165 - * the new snap context. 4166 - * 4167 - * If the now missing snapshot is the one the 4168 - * image is mapped to, clear its exists flag 4169 - * so we can avoid sending any more requests 4170 - * to it. 4171 - */ 4172 - if (rbd_dev->spec->snap_id == snap->id) 4173 - clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 4174 - rbd_remove_snap_dev(snap); 4175 - dout("%ssnap id %llu has been removed\n", 4176 - rbd_dev->spec->snap_id == snap->id ? 4177 - "mapped " : "", 4178 - (unsigned long long) snap->id); 4179 - 4180 - /* Done with this list entry; advance */ 4181 - 4182 - links = next; 4183 - continue; 4184 - } 4185 - 4186 - snap_name = rbd_dev_snap_info(rbd_dev, index, 4187 - &snap_size, &snap_features); 4188 - if (IS_ERR(snap_name)) 4189 - return PTR_ERR(snap_name); 4190 - 4191 - dout("entry %u: snap_id = %llu\n", (unsigned int) snap_count, 4192 - (unsigned long long) snap_id); 4193 - if (!snap || (snap_id != CEPH_NOSNAP && snap->id < snap_id)) { 4194 - struct rbd_snap *new_snap; 4195 - 4196 - /* We haven't seen this snapshot before */ 4197 - 4198 - new_snap = __rbd_add_snap_dev(rbd_dev, snap_name, 4199 - snap_id, snap_size, snap_features); 4200 - if (IS_ERR(new_snap)) { 4201 - int err = PTR_ERR(new_snap); 4202 - 4203 - dout(" failed to add dev, error %d\n", err); 4204 - 4205 - return err; 4206 - } 4207 - 4208 - /* New goes before existing, or at end of list */ 4209 - 4210 - dout(" added dev%s\n", snap ? "" : " at end\n"); 4211 - if (snap) 4212 - list_add_tail(&new_snap->node, &snap->node); 4213 - else 4214 - list_add_tail(&new_snap->node, head); 4215 - } else { 4216 - /* Already have this one */ 4217 - 4218 - dout(" already present\n"); 4219 - 4220 - rbd_assert(snap->size == snap_size); 4221 - rbd_assert(!strcmp(snap->name, snap_name)); 4222 - rbd_assert(snap->features == snap_features); 4223 - 4224 - /* Done with this list entry; advance */ 4225 - 4226 - links = links->next; 4227 - } 4228 - 4229 - /* Advance to the next entry in the snapshot context */ 4230 - 4231 - index++; 4232 - } 4233 - dout("%s: done\n", __func__); 4234 - 4235 - return 0; 4236 - } 4237 - 4238 - /* 4239 - * Scan the list of snapshots and register the devices for any that 4240 - * have not already been registered. 4241 - */ 4242 - static int rbd_dev_snaps_register(struct rbd_device *rbd_dev) 4243 - { 4244 - struct rbd_snap *snap; 4245 - int ret = 0; 4246 - 4247 - dout("%s:\n", __func__); 4248 - if (WARN_ON(!device_is_registered(&rbd_dev->dev))) 4249 - return -EIO; 4250 - 4251 - list_for_each_entry(snap, &rbd_dev->snaps, node) { 4252 - if (!rbd_snap_registered(snap)) { 4253 - ret = rbd_register_snap_dev(snap, &rbd_dev->dev); 4254 - if (ret < 0) 4255 - break; 4256 - } 4257 - } 4258 - dout("%s: returning %d\n", __func__, ret); 4259 3263 4260 3264 return ret; 4261 3265 } ··· 4087 3459 dev->bus = &rbd_bus_type; 4088 3460 dev->type = &rbd_device_type; 4089 3461 dev->parent = &rbd_root_dev; 4090 - dev->release = rbd_dev_release; 3462 + dev->release = rbd_dev_device_release; 4091 3463 dev_set_name(dev, "%d", rbd_dev->dev_id); 4092 3464 ret = device_register(dev); 4093 3465 ··· 4301 3673 size_t len; 4302 3674 char *options; 4303 3675 const char *mon_addrs; 3676 + char *snap_name; 4304 3677 size_t mon_addrs_size; 4305 3678 struct rbd_spec *spec = NULL; 4306 3679 struct rbd_options *rbd_opts = NULL; ··· 4360 3731 ret = -ENAMETOOLONG; 4361 3732 goto out_err; 4362 3733 } 4363 - spec->snap_name = kmemdup(buf, len + 1, GFP_KERNEL); 4364 - if (!spec->snap_name) 3734 + snap_name = kmemdup(buf, len + 1, GFP_KERNEL); 3735 + if (!snap_name) 4365 3736 goto out_mem; 4366 - *(spec->snap_name + len) = '\0'; 3737 + *(snap_name + len) = '\0'; 3738 + spec->snap_name = snap_name; 4367 3739 4368 3740 /* Initialize all rbd options to the defaults */ 4369 3741 ··· 4418 3788 size_t size; 4419 3789 char *object_name; 4420 3790 void *response; 4421 - void *p; 3791 + char *image_id; 4422 3792 4423 3793 /* 4424 3794 * When probing a parent image, the image id is already 4425 3795 * known (and the image name likely is not). There's no 4426 - * need to fetch the image id again in this case. 3796 + * need to fetch the image id again in this case. We 3797 + * do still need to set the image format though. 4427 3798 */ 4428 - if (rbd_dev->spec->image_id) 3799 + if (rbd_dev->spec->image_id) { 3800 + rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1; 3801 + 4429 3802 return 0; 3803 + } 4430 3804 4431 3805 /* 4432 3806 * First, see if the format 2 image id file exists, and if ··· 4452 3818 goto out; 4453 3819 } 4454 3820 4455 - ret = rbd_obj_method_sync(rbd_dev, object_name, 4456 - "rbd", "get_id", 4457 - NULL, 0, 4458 - response, RBD_IMAGE_ID_LEN_MAX, NULL); 4459 - dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 4460 - if (ret < 0) 4461 - goto out; 3821 + /* If it doesn't exist we'll assume it's a format 1 image */ 4462 3822 4463 - p = response; 4464 - rbd_dev->spec->image_id = ceph_extract_encoded_string(&p, 4465 - p + RBD_IMAGE_ID_LEN_MAX, 3823 + ret = rbd_obj_method_sync(rbd_dev, object_name, 3824 + "rbd", "get_id", NULL, 0, 3825 + response, RBD_IMAGE_ID_LEN_MAX); 3826 + dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 3827 + if (ret == -ENOENT) { 3828 + image_id = kstrdup("", GFP_KERNEL); 3829 + ret = image_id ? 0 : -ENOMEM; 3830 + if (!ret) 3831 + rbd_dev->image_format = 1; 3832 + } else if (ret > sizeof (__le32)) { 3833 + void *p = response; 3834 + 3835 + image_id = ceph_extract_encoded_string(&p, p + ret, 4466 3836 NULL, GFP_NOIO); 4467 - if (IS_ERR(rbd_dev->spec->image_id)) { 4468 - ret = PTR_ERR(rbd_dev->spec->image_id); 4469 - rbd_dev->spec->image_id = NULL; 3837 + ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0; 3838 + if (!ret) 3839 + rbd_dev->image_format = 2; 4470 3840 } else { 4471 - dout("image_id is %s\n", rbd_dev->spec->image_id); 3841 + ret = -EINVAL; 3842 + } 3843 + 3844 + if (!ret) { 3845 + rbd_dev->spec->image_id = image_id; 3846 + dout("image_id is %s\n", image_id); 4472 3847 } 4473 3848 out: 4474 3849 kfree(response); ··· 4486 3843 return ret; 4487 3844 } 4488 3845 3846 + /* Undo whatever state changes are made by v1 or v2 image probe */ 3847 + 3848 + static void rbd_dev_unprobe(struct rbd_device *rbd_dev) 3849 + { 3850 + struct rbd_image_header *header; 3851 + 3852 + rbd_dev_remove_parent(rbd_dev); 3853 + rbd_spec_put(rbd_dev->parent_spec); 3854 + rbd_dev->parent_spec = NULL; 3855 + rbd_dev->parent_overlap = 0; 3856 + 3857 + /* Free dynamic fields from the header, then zero it out */ 3858 + 3859 + header = &rbd_dev->header; 3860 + ceph_put_snap_context(header->snapc); 3861 + kfree(header->snap_sizes); 3862 + kfree(header->snap_names); 3863 + kfree(header->object_prefix); 3864 + memset(header, 0, sizeof (*header)); 3865 + } 3866 + 4489 3867 static int rbd_dev_v1_probe(struct rbd_device *rbd_dev) 4490 3868 { 4491 3869 int ret; 4492 - size_t size; 4493 - 4494 - /* Version 1 images have no id; empty string is used */ 4495 - 4496 - rbd_dev->spec->image_id = kstrdup("", GFP_KERNEL); 4497 - if (!rbd_dev->spec->image_id) 4498 - return -ENOMEM; 4499 - 4500 - /* Record the header object name for this rbd image. */ 4501 - 4502 - size = strlen(rbd_dev->spec->image_name) + sizeof (RBD_SUFFIX); 4503 - rbd_dev->header_name = kmalloc(size, GFP_KERNEL); 4504 - if (!rbd_dev->header_name) { 4505 - ret = -ENOMEM; 4506 - goto out_err; 4507 - } 4508 - sprintf(rbd_dev->header_name, "%s%s", 4509 - rbd_dev->spec->image_name, RBD_SUFFIX); 4510 3870 4511 3871 /* Populate rbd image metadata */ 4512 3872 ··· 4521 3875 4522 3876 rbd_dev->parent_spec = NULL; 4523 3877 rbd_dev->parent_overlap = 0; 4524 - 4525 - rbd_dev->image_format = 1; 4526 3878 4527 3879 dout("discovered version 1 image, header name is %s\n", 4528 3880 rbd_dev->header_name); ··· 4538 3894 4539 3895 static int rbd_dev_v2_probe(struct rbd_device *rbd_dev) 4540 3896 { 4541 - size_t size; 4542 3897 int ret; 4543 - u64 ver = 0; 4544 - 4545 - /* 4546 - * Image id was filled in by the caller. Record the header 4547 - * object name for this rbd image. 4548 - */ 4549 - size = sizeof (RBD_HEADER_PREFIX) + strlen(rbd_dev->spec->image_id); 4550 - rbd_dev->header_name = kmalloc(size, GFP_KERNEL); 4551 - if (!rbd_dev->header_name) 4552 - return -ENOMEM; 4553 - sprintf(rbd_dev->header_name, "%s%s", 4554 - RBD_HEADER_PREFIX, rbd_dev->spec->image_id); 4555 - 4556 - /* Get the size and object order for the image */ 4557 3898 4558 3899 ret = rbd_dev_v2_image_size(rbd_dev); 4559 - if (ret < 0) 3900 + if (ret) 4560 3901 goto out_err; 4561 3902 4562 3903 /* Get the object prefix (a.k.a. block_name) for the image */ 4563 3904 4564 3905 ret = rbd_dev_v2_object_prefix(rbd_dev); 4565 - if (ret < 0) 3906 + if (ret) 4566 3907 goto out_err; 4567 3908 4568 3909 /* Get the and check features for the image */ 4569 3910 4570 3911 ret = rbd_dev_v2_features(rbd_dev); 4571 - if (ret < 0) 3912 + if (ret) 4572 3913 goto out_err; 4573 3914 4574 3915 /* If the image supports layering, get the parent info */ 4575 3916 4576 3917 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) { 4577 3918 ret = rbd_dev_v2_parent_info(rbd_dev); 3919 + if (ret) 3920 + goto out_err; 3921 + 3922 + /* 3923 + * Don't print a warning for parent images. We can 3924 + * tell this point because we won't know its pool 3925 + * name yet (just its pool id). 3926 + */ 3927 + if (rbd_dev->spec->pool_name) 3928 + rbd_warn(rbd_dev, "WARNING: kernel layering " 3929 + "is EXPERIMENTAL!"); 3930 + } 3931 + 3932 + /* If the image supports fancy striping, get its parameters */ 3933 + 3934 + if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) { 3935 + ret = rbd_dev_v2_striping_info(rbd_dev); 4578 3936 if (ret < 0) 4579 3937 goto out_err; 4580 3938 } ··· 4588 3942 4589 3943 /* Get the snapshot context, plus the header version */ 4590 3944 4591 - ret = rbd_dev_v2_snap_context(rbd_dev, &ver); 3945 + ret = rbd_dev_v2_snap_context(rbd_dev); 4592 3946 if (ret) 4593 3947 goto out_err; 4594 - rbd_dev->header.obj_version = ver; 4595 - 4596 - rbd_dev->image_format = 2; 4597 3948 4598 3949 dout("discovered version 2 image, header name is %s\n", 4599 3950 rbd_dev->header_name); ··· 4608 3965 return ret; 4609 3966 } 4610 3967 4611 - static int rbd_dev_probe_finish(struct rbd_device *rbd_dev) 3968 + static int rbd_dev_probe_parent(struct rbd_device *rbd_dev) 3969 + { 3970 + struct rbd_device *parent = NULL; 3971 + struct rbd_spec *parent_spec; 3972 + struct rbd_client *rbdc; 3973 + int ret; 3974 + 3975 + if (!rbd_dev->parent_spec) 3976 + return 0; 3977 + /* 3978 + * We need to pass a reference to the client and the parent 3979 + * spec when creating the parent rbd_dev. Images related by 3980 + * parent/child relationships always share both. 3981 + */ 3982 + parent_spec = rbd_spec_get(rbd_dev->parent_spec); 3983 + rbdc = __rbd_get_client(rbd_dev->rbd_client); 3984 + 3985 + ret = -ENOMEM; 3986 + parent = rbd_dev_create(rbdc, parent_spec); 3987 + if (!parent) 3988 + goto out_err; 3989 + 3990 + ret = rbd_dev_image_probe(parent); 3991 + if (ret < 0) 3992 + goto out_err; 3993 + rbd_dev->parent = parent; 3994 + 3995 + return 0; 3996 + out_err: 3997 + if (parent) { 3998 + rbd_spec_put(rbd_dev->parent_spec); 3999 + kfree(rbd_dev->header_name); 4000 + rbd_dev_destroy(parent); 4001 + } else { 4002 + rbd_put_client(rbdc); 4003 + rbd_spec_put(parent_spec); 4004 + } 4005 + 4006 + return ret; 4007 + } 4008 + 4009 + static int rbd_dev_device_setup(struct rbd_device *rbd_dev) 4612 4010 { 4613 4011 int ret; 4614 4012 4615 - /* no need to lock here, as rbd_dev is not registered yet */ 4616 - ret = rbd_dev_snaps_update(rbd_dev); 4013 + ret = rbd_dev_mapping_set(rbd_dev); 4617 4014 if (ret) 4618 4015 return ret; 4619 - 4620 - ret = rbd_dev_probe_update_spec(rbd_dev); 4621 - if (ret) 4622 - goto err_out_snaps; 4623 - 4624 - ret = rbd_dev_set_mapping(rbd_dev); 4625 - if (ret) 4626 - goto err_out_snaps; 4627 4016 4628 4017 /* generate unique id: find highest unique id, add one */ 4629 4018 rbd_dev_id_get(rbd_dev); ··· 4682 4007 if (ret) 4683 4008 goto err_out_disk; 4684 4009 4685 - /* 4686 - * At this point cleanup in the event of an error is the job 4687 - * of the sysfs code (initiated by rbd_bus_del_dev()). 4688 - */ 4689 - down_write(&rbd_dev->header_rwsem); 4690 - ret = rbd_dev_snaps_register(rbd_dev); 4691 - up_write(&rbd_dev->header_rwsem); 4692 - if (ret) 4693 - goto err_out_bus; 4694 - 4695 - ret = rbd_dev_header_watch_sync(rbd_dev, 1); 4696 - if (ret) 4697 - goto err_out_bus; 4698 - 4699 4010 /* Everything's ready. Announce the disk to the world. */ 4700 4011 4012 + set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); 4013 + set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 4701 4014 add_disk(rbd_dev->disk); 4702 4015 4703 4016 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name, 4704 4017 (unsigned long long) rbd_dev->mapping.size); 4705 4018 4706 4019 return ret; 4707 - err_out_bus: 4708 - /* this will also clean up rest of rbd_dev stuff */ 4709 4020 4710 - rbd_bus_del_dev(rbd_dev); 4711 - 4712 - return ret; 4713 4021 err_out_disk: 4714 4022 rbd_free_disk(rbd_dev); 4715 4023 err_out_blkdev: 4716 4024 unregister_blkdev(rbd_dev->major, rbd_dev->name); 4717 4025 err_out_id: 4718 4026 rbd_dev_id_put(rbd_dev); 4719 - err_out_snaps: 4720 - rbd_remove_all_snaps(rbd_dev); 4027 + rbd_dev_mapping_clear(rbd_dev); 4721 4028 4722 4029 return ret; 4030 + } 4031 + 4032 + static int rbd_dev_header_name(struct rbd_device *rbd_dev) 4033 + { 4034 + struct rbd_spec *spec = rbd_dev->spec; 4035 + size_t size; 4036 + 4037 + /* Record the header object name for this rbd image. */ 4038 + 4039 + rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); 4040 + 4041 + if (rbd_dev->image_format == 1) 4042 + size = strlen(spec->image_name) + sizeof (RBD_SUFFIX); 4043 + else 4044 + size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id); 4045 + 4046 + rbd_dev->header_name = kmalloc(size, GFP_KERNEL); 4047 + if (!rbd_dev->header_name) 4048 + return -ENOMEM; 4049 + 4050 + if (rbd_dev->image_format == 1) 4051 + sprintf(rbd_dev->header_name, "%s%s", 4052 + spec->image_name, RBD_SUFFIX); 4053 + else 4054 + sprintf(rbd_dev->header_name, "%s%s", 4055 + RBD_HEADER_PREFIX, spec->image_id); 4056 + return 0; 4057 + } 4058 + 4059 + static void rbd_dev_image_release(struct rbd_device *rbd_dev) 4060 + { 4061 + int ret; 4062 + 4063 + rbd_dev_unprobe(rbd_dev); 4064 + ret = rbd_dev_header_watch_sync(rbd_dev, 0); 4065 + if (ret) 4066 + rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret); 4067 + kfree(rbd_dev->header_name); 4068 + rbd_dev->header_name = NULL; 4069 + rbd_dev->image_format = 0; 4070 + kfree(rbd_dev->spec->image_id); 4071 + rbd_dev->spec->image_id = NULL; 4072 + 4073 + rbd_dev_destroy(rbd_dev); 4723 4074 } 4724 4075 4725 4076 /* ··· 4753 4052 * device. For format 2 images this includes determining the image 4754 4053 * id. 4755 4054 */ 4756 - static int rbd_dev_probe(struct rbd_device *rbd_dev) 4055 + static int rbd_dev_image_probe(struct rbd_device *rbd_dev) 4757 4056 { 4758 4057 int ret; 4058 + int tmp; 4759 4059 4760 4060 /* 4761 4061 * Get the id from the image id object. If it's not a ··· 4765 4063 */ 4766 4064 ret = rbd_dev_image_id(rbd_dev); 4767 4065 if (ret) 4066 + return ret; 4067 + rbd_assert(rbd_dev->spec->image_id); 4068 + rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); 4069 + 4070 + ret = rbd_dev_header_name(rbd_dev); 4071 + if (ret) 4072 + goto err_out_format; 4073 + 4074 + ret = rbd_dev_header_watch_sync(rbd_dev, 1); 4075 + if (ret) 4076 + goto out_header_name; 4077 + 4078 + if (rbd_dev->image_format == 1) 4768 4079 ret = rbd_dev_v1_probe(rbd_dev); 4769 4080 else 4770 4081 ret = rbd_dev_v2_probe(rbd_dev); 4771 - if (ret) { 4772 - dout("probe failed, returning %d\n", ret); 4773 - 4774 - return ret; 4775 - } 4776 - 4777 - ret = rbd_dev_probe_finish(rbd_dev); 4778 4082 if (ret) 4779 - rbd_header_free(&rbd_dev->header); 4083 + goto err_out_watch; 4084 + 4085 + ret = rbd_dev_spec_update(rbd_dev); 4086 + if (ret) 4087 + goto err_out_probe; 4088 + 4089 + ret = rbd_dev_probe_parent(rbd_dev); 4090 + if (!ret) 4091 + return 0; 4092 + 4093 + err_out_probe: 4094 + rbd_dev_unprobe(rbd_dev); 4095 + err_out_watch: 4096 + tmp = rbd_dev_header_watch_sync(rbd_dev, 0); 4097 + if (tmp) 4098 + rbd_warn(rbd_dev, "unable to tear down watch request\n"); 4099 + out_header_name: 4100 + kfree(rbd_dev->header_name); 4101 + rbd_dev->header_name = NULL; 4102 + err_out_format: 4103 + rbd_dev->image_format = 0; 4104 + kfree(rbd_dev->spec->image_id); 4105 + rbd_dev->spec->image_id = NULL; 4106 + 4107 + dout("probe failed, returning %d\n", ret); 4780 4108 4781 4109 return ret; 4782 4110 } ··· 4843 4111 rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name); 4844 4112 if (rc < 0) 4845 4113 goto err_out_client; 4846 - spec->pool_id = (u64) rc; 4114 + spec->pool_id = (u64)rc; 4847 4115 4848 4116 /* The ceph file layout needs to fit pool id in 32 bits */ 4849 4117 4850 - if (WARN_ON(spec->pool_id > (u64) U32_MAX)) { 4118 + if (spec->pool_id > (u64)U32_MAX) { 4119 + rbd_warn(NULL, "pool id too large (%llu > %u)\n", 4120 + (unsigned long long)spec->pool_id, U32_MAX); 4851 4121 rc = -EIO; 4852 4122 goto err_out_client; 4853 4123 } ··· 4864 4130 kfree(rbd_opts); 4865 4131 rbd_opts = NULL; /* done with this */ 4866 4132 4867 - rc = rbd_dev_probe(rbd_dev); 4133 + rc = rbd_dev_image_probe(rbd_dev); 4868 4134 if (rc < 0) 4869 4135 goto err_out_rbd_dev; 4870 4136 4871 - return count; 4137 + rc = rbd_dev_device_setup(rbd_dev); 4138 + if (!rc) 4139 + return count; 4140 + 4141 + rbd_dev_image_release(rbd_dev); 4872 4142 err_out_rbd_dev: 4873 4143 rbd_dev_destroy(rbd_dev); 4874 4144 err_out_client: ··· 4887 4149 4888 4150 dout("Error adding device %s\n", buf); 4889 4151 4890 - return (ssize_t) rc; 4152 + return (ssize_t)rc; 4891 4153 } 4892 4154 4893 4155 static struct rbd_device *__rbd_get_dev(unsigned long dev_id) ··· 4907 4169 return NULL; 4908 4170 } 4909 4171 4910 - static void rbd_dev_release(struct device *dev) 4172 + static void rbd_dev_device_release(struct device *dev) 4911 4173 { 4912 4174 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4913 4175 4914 - if (rbd_dev->watch_event) 4915 - rbd_dev_header_watch_sync(rbd_dev, 0); 4916 - 4917 - /* clean up and free blkdev */ 4918 4176 rbd_free_disk(rbd_dev); 4177 + clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 4178 + rbd_dev_clear_mapping(rbd_dev); 4919 4179 unregister_blkdev(rbd_dev->major, rbd_dev->name); 4920 - 4921 - /* release allocated disk header fields */ 4922 - rbd_header_free(&rbd_dev->header); 4923 - 4924 - /* done with the id, and with the rbd_dev */ 4180 + rbd_dev->major = 0; 4925 4181 rbd_dev_id_put(rbd_dev); 4926 - rbd_assert(rbd_dev->rbd_client != NULL); 4927 - rbd_dev_destroy(rbd_dev); 4182 + rbd_dev_mapping_clear(rbd_dev); 4183 + } 4928 4184 4929 - /* release module ref */ 4930 - module_put(THIS_MODULE); 4185 + static void rbd_dev_remove_parent(struct rbd_device *rbd_dev) 4186 + { 4187 + while (rbd_dev->parent) { 4188 + struct rbd_device *first = rbd_dev; 4189 + struct rbd_device *second = first->parent; 4190 + struct rbd_device *third; 4191 + 4192 + /* 4193 + * Follow to the parent with no grandparent and 4194 + * remove it. 4195 + */ 4196 + while (second && (third = second->parent)) { 4197 + first = second; 4198 + second = third; 4199 + } 4200 + rbd_assert(second); 4201 + rbd_dev_image_release(second); 4202 + first->parent = NULL; 4203 + first->parent_overlap = 0; 4204 + 4205 + rbd_assert(first->parent_spec); 4206 + rbd_spec_put(first->parent_spec); 4207 + first->parent_spec = NULL; 4208 + } 4931 4209 } 4932 4210 4933 4211 static ssize_t rbd_remove(struct bus_type *bus, ··· 4951 4197 size_t count) 4952 4198 { 4953 4199 struct rbd_device *rbd_dev = NULL; 4954 - int target_id, rc; 4200 + int target_id; 4955 4201 unsigned long ul; 4956 - int ret = count; 4202 + int ret; 4957 4203 4958 - rc = strict_strtoul(buf, 10, &ul); 4959 - if (rc) 4960 - return rc; 4204 + ret = strict_strtoul(buf, 10, &ul); 4205 + if (ret) 4206 + return ret; 4961 4207 4962 4208 /* convert to int; abort if we lost anything in the conversion */ 4963 4209 target_id = (int) ul; ··· 4980 4226 spin_unlock_irq(&rbd_dev->lock); 4981 4227 if (ret < 0) 4982 4228 goto done; 4983 - 4984 - rbd_remove_all_snaps(rbd_dev); 4229 + ret = count; 4985 4230 rbd_bus_del_dev(rbd_dev); 4986 - 4231 + rbd_dev_image_release(rbd_dev); 4232 + module_put(THIS_MODULE); 4987 4233 done: 4988 4234 mutex_unlock(&ctl_mutex); 4989 4235 ··· 5015 4261 device_unregister(&rbd_root_dev); 5016 4262 } 5017 4263 4264 + static int rbd_slab_init(void) 4265 + { 4266 + rbd_assert(!rbd_img_request_cache); 4267 + rbd_img_request_cache = kmem_cache_create("rbd_img_request", 4268 + sizeof (struct rbd_img_request), 4269 + __alignof__(struct rbd_img_request), 4270 + 0, NULL); 4271 + if (!rbd_img_request_cache) 4272 + return -ENOMEM; 4273 + 4274 + rbd_assert(!rbd_obj_request_cache); 4275 + rbd_obj_request_cache = kmem_cache_create("rbd_obj_request", 4276 + sizeof (struct rbd_obj_request), 4277 + __alignof__(struct rbd_obj_request), 4278 + 0, NULL); 4279 + if (!rbd_obj_request_cache) 4280 + goto out_err; 4281 + 4282 + rbd_assert(!rbd_segment_name_cache); 4283 + rbd_segment_name_cache = kmem_cache_create("rbd_segment_name", 4284 + MAX_OBJ_NAME_SIZE + 1, 1, 0, NULL); 4285 + if (rbd_segment_name_cache) 4286 + return 0; 4287 + out_err: 4288 + if (rbd_obj_request_cache) { 4289 + kmem_cache_destroy(rbd_obj_request_cache); 4290 + rbd_obj_request_cache = NULL; 4291 + } 4292 + 4293 + kmem_cache_destroy(rbd_img_request_cache); 4294 + rbd_img_request_cache = NULL; 4295 + 4296 + return -ENOMEM; 4297 + } 4298 + 4299 + static void rbd_slab_exit(void) 4300 + { 4301 + rbd_assert(rbd_segment_name_cache); 4302 + kmem_cache_destroy(rbd_segment_name_cache); 4303 + rbd_segment_name_cache = NULL; 4304 + 4305 + rbd_assert(rbd_obj_request_cache); 4306 + kmem_cache_destroy(rbd_obj_request_cache); 4307 + rbd_obj_request_cache = NULL; 4308 + 4309 + rbd_assert(rbd_img_request_cache); 4310 + kmem_cache_destroy(rbd_img_request_cache); 4311 + rbd_img_request_cache = NULL; 4312 + } 4313 + 5018 4314 static int __init rbd_init(void) 5019 4315 { 5020 4316 int rc; ··· 5074 4270 5075 4271 return -EINVAL; 5076 4272 } 5077 - rc = rbd_sysfs_init(); 4273 + rc = rbd_slab_init(); 5078 4274 if (rc) 5079 4275 return rc; 5080 - pr_info("loaded " RBD_DRV_NAME_LONG "\n"); 5081 - return 0; 4276 + rc = rbd_sysfs_init(); 4277 + if (rc) 4278 + rbd_slab_exit(); 4279 + else 4280 + pr_info("loaded " RBD_DRV_NAME_LONG "\n"); 4281 + 4282 + return rc; 5082 4283 } 5083 4284 5084 4285 static void __exit rbd_exit(void) 5085 4286 { 5086 4287 rbd_sysfs_cleanup(); 4288 + rbd_slab_exit(); 5087 4289 } 5088 4290 5089 4291 module_init(rbd_init);
+108 -114
fs/ceph/addr.c
··· 236 236 static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg) 237 237 { 238 238 struct inode *inode = req->r_inode; 239 + struct ceph_osd_data *osd_data; 239 240 int rc = req->r_result; 240 241 int bytes = le32_to_cpu(msg->hdr.data_len); 242 + int num_pages; 241 243 int i; 242 244 243 245 dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes); 244 246 245 247 /* unlock all pages, zeroing any data we didn't read */ 246 - for (i = 0; i < req->r_num_pages; i++, bytes -= PAGE_CACHE_SIZE) { 247 - struct page *page = req->r_pages[i]; 248 + osd_data = osd_req_op_extent_osd_data(req, 0); 249 + BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 250 + num_pages = calc_pages_for((u64)osd_data->alignment, 251 + (u64)osd_data->length); 252 + for (i = 0; i < num_pages; i++) { 253 + struct page *page = osd_data->pages[i]; 248 254 249 255 if (bytes < (int)PAGE_CACHE_SIZE) { 250 256 /* zero (remainder of) page */ ··· 263 257 SetPageUptodate(page); 264 258 unlock_page(page); 265 259 page_cache_release(page); 260 + bytes -= PAGE_CACHE_SIZE; 266 261 } 267 - kfree(req->r_pages); 262 + kfree(osd_data->pages); 268 263 } 269 264 270 265 static void ceph_unlock_page_vector(struct page **pages, int num_pages) ··· 286 279 &ceph_inode_to_client(inode)->client->osdc; 287 280 struct ceph_inode_info *ci = ceph_inode(inode); 288 281 struct page *page = list_entry(page_list->prev, struct page, lru); 282 + struct ceph_vino vino; 289 283 struct ceph_osd_request *req; 290 284 u64 off; 291 285 u64 len; ··· 311 303 len = nr_pages << PAGE_CACHE_SHIFT; 312 304 dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages, 313 305 off, len); 314 - 315 - req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode), 316 - off, &len, 317 - CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, 318 - NULL, 0, 306 + vino = ceph_vino(inode); 307 + req = ceph_osdc_new_request(osdc, &ci->i_layout, vino, off, &len, 308 + 1, CEPH_OSD_OP_READ, 309 + CEPH_OSD_FLAG_READ, NULL, 319 310 ci->i_truncate_seq, ci->i_truncate_size, 320 - NULL, false, 0); 311 + false); 321 312 if (IS_ERR(req)) 322 313 return PTR_ERR(req); 323 314 324 315 /* build page vector */ 325 - nr_pages = len >> PAGE_CACHE_SHIFT; 316 + nr_pages = calc_pages_for(0, len); 326 317 pages = kmalloc(sizeof(*pages) * nr_pages, GFP_NOFS); 327 318 ret = -ENOMEM; 328 319 if (!pages) ··· 343 336 } 344 337 pages[i] = page; 345 338 } 346 - req->r_pages = pages; 347 - req->r_num_pages = nr_pages; 339 + osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false); 348 340 req->r_callback = finish_read; 349 341 req->r_inode = inode; 342 + 343 + ceph_osdc_build_request(req, off, NULL, vino.snap, NULL); 350 344 351 345 dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len); 352 346 ret = ceph_osdc_start_request(osdc, req, false); ··· 381 373 max = (fsc->mount_options->rsize + PAGE_CACHE_SIZE - 1) 382 374 >> PAGE_SHIFT; 383 375 384 - dout("readpages %p file %p nr_pages %d max %d\n", inode, file, nr_pages, 376 + dout("readpages %p file %p nr_pages %d max %d\n", inode, 377 + file, nr_pages, 385 378 max); 386 379 while (!list_empty(page_list)) { 387 380 rc = start_read(inode, page_list, max); ··· 557 548 { 558 549 struct inode *inode = req->r_inode; 559 550 struct ceph_inode_info *ci = ceph_inode(inode); 551 + struct ceph_osd_data *osd_data; 560 552 unsigned wrote; 561 553 struct page *page; 554 + int num_pages; 562 555 int i; 563 556 struct ceph_snap_context *snapc = req->r_snapc; 564 557 struct address_space *mapping = inode->i_mapping; 565 558 int rc = req->r_result; 566 - u64 bytes = le64_to_cpu(req->r_request_ops[0].extent.length); 559 + u64 bytes = req->r_ops[0].extent.length; 567 560 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 568 561 long writeback_stat; 569 562 unsigned issued = ceph_caps_issued(ci); 570 563 564 + osd_data = osd_req_op_extent_osd_data(req, 0); 565 + BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 566 + num_pages = calc_pages_for((u64)osd_data->alignment, 567 + (u64)osd_data->length); 571 568 if (rc >= 0) { 572 569 /* 573 570 * Assume we wrote the pages we originally sent. The ··· 581 566 * raced with a truncation and was adjusted at the osd, 582 567 * so don't believe the reply. 583 568 */ 584 - wrote = req->r_num_pages; 569 + wrote = num_pages; 585 570 } else { 586 571 wrote = 0; 587 572 mapping_set_error(mapping, rc); ··· 590 575 inode, rc, bytes, wrote); 591 576 592 577 /* clean all pages */ 593 - for (i = 0; i < req->r_num_pages; i++) { 594 - page = req->r_pages[i]; 578 + for (i = 0; i < num_pages; i++) { 579 + page = osd_data->pages[i]; 595 580 BUG_ON(!page); 596 581 WARN_ON(!PageUptodate(page)); 597 582 ··· 620 605 unlock_page(page); 621 606 } 622 607 dout("%p wrote+cleaned %d pages\n", inode, wrote); 623 - ceph_put_wrbuffer_cap_refs(ci, req->r_num_pages, snapc); 608 + ceph_put_wrbuffer_cap_refs(ci, num_pages, snapc); 624 609 625 - ceph_release_pages(req->r_pages, req->r_num_pages); 626 - if (req->r_pages_from_pool) 627 - mempool_free(req->r_pages, 610 + ceph_release_pages(osd_data->pages, num_pages); 611 + if (osd_data->pages_from_pool) 612 + mempool_free(osd_data->pages, 628 613 ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool); 629 614 else 630 - kfree(req->r_pages); 615 + kfree(osd_data->pages); 631 616 ceph_osdc_put_request(req); 632 617 } 633 618 634 - /* 635 - * allocate a page vec, either directly, or if necessary, via a the 636 - * mempool. we avoid the mempool if we can because req->r_num_pages 637 - * may be less than the maximum write size. 638 - */ 639 - static void alloc_page_vec(struct ceph_fs_client *fsc, 640 - struct ceph_osd_request *req) 619 + static struct ceph_osd_request * 620 + ceph_writepages_osd_request(struct inode *inode, u64 offset, u64 *len, 621 + struct ceph_snap_context *snapc, int num_ops) 641 622 { 642 - req->r_pages = kmalloc(sizeof(struct page *) * req->r_num_pages, 643 - GFP_NOFS); 644 - if (!req->r_pages) { 645 - req->r_pages = mempool_alloc(fsc->wb_pagevec_pool, GFP_NOFS); 646 - req->r_pages_from_pool = 1; 647 - WARN_ON(!req->r_pages); 648 - } 623 + struct ceph_fs_client *fsc; 624 + struct ceph_inode_info *ci; 625 + struct ceph_vino vino; 626 + 627 + fsc = ceph_inode_to_client(inode); 628 + ci = ceph_inode(inode); 629 + vino = ceph_vino(inode); 630 + /* BUG_ON(vino.snap != CEPH_NOSNAP); */ 631 + 632 + return ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 633 + vino, offset, len, num_ops, CEPH_OSD_OP_WRITE, 634 + CEPH_OSD_FLAG_WRITE|CEPH_OSD_FLAG_ONDISK, 635 + snapc, ci->i_truncate_seq, ci->i_truncate_size, true); 649 636 } 650 637 651 638 /* ··· 670 653 unsigned wsize = 1 << inode->i_blkbits; 671 654 struct ceph_osd_request *req = NULL; 672 655 int do_sync; 673 - u64 snap_size = 0; 656 + u64 snap_size; 674 657 675 658 /* 676 659 * Include a 'sync' in the OSD request if this is a data ··· 716 699 retry: 717 700 /* find oldest snap context with dirty data */ 718 701 ceph_put_snap_context(snapc); 702 + snap_size = 0; 719 703 snapc = get_oldest_context(inode, &snap_size); 720 704 if (!snapc) { 721 705 /* hmm, why does writepages get called when there ··· 724 706 dout(" no snap context with dirty data?\n"); 725 707 goto out; 726 708 } 709 + if (snap_size == 0) 710 + snap_size = i_size_read(inode); 727 711 dout(" oldest snapc is %p seq %lld (%d snaps)\n", 728 712 snapc, snapc->seq, snapc->num_snaps); 729 713 if (last_snapc && snapc != last_snapc) { ··· 738 718 last_snapc = snapc; 739 719 740 720 while (!done && index <= end) { 721 + int num_ops = do_sync ? 2 : 1; 722 + struct ceph_vino vino; 741 723 unsigned i; 742 724 int first; 743 725 pgoff_t next; 744 726 int pvec_pages, locked_pages; 727 + struct page **pages = NULL; 728 + mempool_t *pool = NULL; /* Becomes non-null if mempool used */ 745 729 struct page *page; 746 730 int want; 747 731 u64 offset, len; ··· 797 773 dout("waiting on writeback %p\n", page); 798 774 wait_on_page_writeback(page); 799 775 } 800 - if ((snap_size && page_offset(page) > snap_size) || 801 - (!snap_size && 802 - page_offset(page) > i_size_read(inode))) { 803 - dout("%p page eof %llu\n", page, snap_size ? 804 - snap_size : i_size_read(inode)); 776 + if (page_offset(page) >= snap_size) { 777 + dout("%p page eof %llu\n", page, snap_size); 805 778 done = 1; 806 779 unlock_page(page); 807 780 break; ··· 826 805 break; 827 806 } 828 807 829 - /* ok */ 808 + /* 809 + * We have something to write. If this is 810 + * the first locked page this time through, 811 + * allocate an osd request and a page array 812 + * that it will use. 813 + */ 830 814 if (locked_pages == 0) { 815 + size_t size; 816 + 817 + BUG_ON(pages); 818 + 831 819 /* prepare async write request */ 832 - offset = (u64) page_offset(page); 820 + offset = (u64)page_offset(page); 833 821 len = wsize; 834 - req = ceph_osdc_new_request(&fsc->client->osdc, 835 - &ci->i_layout, 836 - ceph_vino(inode), 837 - offset, &len, 838 - CEPH_OSD_OP_WRITE, 839 - CEPH_OSD_FLAG_WRITE | 840 - CEPH_OSD_FLAG_ONDISK, 841 - snapc, do_sync, 842 - ci->i_truncate_seq, 843 - ci->i_truncate_size, 844 - &inode->i_mtime, true, 0); 822 + req = ceph_writepages_osd_request(inode, 823 + offset, &len, snapc, 824 + num_ops); 845 825 846 826 if (IS_ERR(req)) { 847 827 rc = PTR_ERR(req); ··· 850 828 break; 851 829 } 852 830 853 - max_pages = req->r_num_pages; 854 - 855 - alloc_page_vec(fsc, req); 856 831 req->r_callback = writepages_finish; 857 832 req->r_inode = inode; 833 + 834 + max_pages = calc_pages_for(0, (u64)len); 835 + size = max_pages * sizeof (*pages); 836 + pages = kmalloc(size, GFP_NOFS); 837 + if (!pages) { 838 + pool = fsc->wb_pagevec_pool; 839 + pages = mempool_alloc(pool, GFP_NOFS); 840 + BUG_ON(!pages); 841 + } 858 842 } 859 843 860 844 /* note position of first page in pvec */ ··· 878 850 } 879 851 880 852 set_page_writeback(page); 881 - req->r_pages[locked_pages] = page; 853 + pages[locked_pages] = page; 882 854 locked_pages++; 883 855 next = page->index + 1; 884 856 } ··· 907 879 pvec.nr -= i-first; 908 880 } 909 881 910 - /* submit the write */ 911 - offset = req->r_pages[0]->index << PAGE_CACHE_SHIFT; 912 - len = min((snap_size ? snap_size : i_size_read(inode)) - offset, 882 + /* Format the osd request message and submit the write */ 883 + 884 + offset = page_offset(pages[0]); 885 + len = min(snap_size - offset, 913 886 (u64)locked_pages << PAGE_CACHE_SHIFT); 914 887 dout("writepages got %d pages at %llu~%llu\n", 915 888 locked_pages, offset, len); 916 889 917 - /* revise final length, page count */ 918 - req->r_num_pages = locked_pages; 919 - req->r_request_ops[0].extent.length = cpu_to_le64(len); 920 - req->r_request_ops[0].payload_len = cpu_to_le32(len); 921 - req->r_request->hdr.data_len = cpu_to_le32(len); 890 + osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, 891 + !!pool, false); 892 + 893 + pages = NULL; /* request message now owns the pages array */ 894 + pool = NULL; 895 + 896 + /* Update the write op length in case we changed it */ 897 + 898 + osd_req_op_extent_update(req, 0, len); 899 + 900 + vino = ceph_vino(inode); 901 + ceph_osdc_build_request(req, offset, snapc, vino.snap, 902 + &inode->i_mtime); 922 903 923 904 rc = ceph_osdc_start_request(&fsc->client->osdc, req, true); 924 905 BUG_ON(rc); ··· 1104 1067 struct page **pagep, void **fsdata) 1105 1068 { 1106 1069 struct inode *inode = file_inode(file); 1107 - struct ceph_inode_info *ci = ceph_inode(inode); 1108 - struct ceph_file_info *fi = file->private_data; 1109 1070 struct page *page; 1110 1071 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1111 - int r, want, got = 0; 1112 - 1113 - if (fi->fmode & CEPH_FILE_MODE_LAZY) 1114 - want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 1115 - else 1116 - want = CEPH_CAP_FILE_BUFFER; 1117 - 1118 - dout("write_begin %p %llx.%llx %llu~%u getting caps. i_size %llu\n", 1119 - inode, ceph_vinop(inode), pos, len, inode->i_size); 1120 - r = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, pos+len); 1121 - if (r < 0) 1122 - return r; 1123 - dout("write_begin %p %llx.%llx %llu~%u got cap refs on %s\n", 1124 - inode, ceph_vinop(inode), pos, len, ceph_cap_string(got)); 1125 - if (!(got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO))) { 1126 - ceph_put_cap_refs(ci, got); 1127 - return -EAGAIN; 1128 - } 1072 + int r; 1129 1073 1130 1074 do { 1131 1075 /* get a page */ 1132 1076 page = grab_cache_page_write_begin(mapping, index, 0); 1133 - if (!page) { 1134 - r = -ENOMEM; 1135 - break; 1136 - } 1077 + if (!page) 1078 + return -ENOMEM; 1079 + *pagep = page; 1137 1080 1138 1081 dout("write_begin file %p inode %p page %p %d~%d\n", file, 1139 1082 inode, page, (int)pos, (int)len); 1140 1083 1141 1084 r = ceph_update_writeable_page(file, pos, len, page); 1142 - if (r) 1143 - page_cache_release(page); 1144 1085 } while (r == -EAGAIN); 1145 1086 1146 - if (r) { 1147 - ceph_put_cap_refs(ci, got); 1148 - } else { 1149 - *pagep = page; 1150 - *(int *)fsdata = got; 1151 - } 1152 1087 return r; 1153 1088 } 1154 1089 ··· 1134 1125 struct page *page, void *fsdata) 1135 1126 { 1136 1127 struct inode *inode = file_inode(file); 1137 - struct ceph_inode_info *ci = ceph_inode(inode); 1138 1128 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1139 1129 struct ceph_mds_client *mdsc = fsc->mdsc; 1140 1130 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1141 1131 int check_cap = 0; 1142 - int got = (unsigned long)fsdata; 1143 1132 1144 1133 dout("write_end file %p inode %p page %p %d~%d (%d)\n", file, 1145 1134 inode, page, (int)pos, (int)copied, (int)len); ··· 1159 1152 unlock_page(page); 1160 1153 up_read(&mdsc->snap_rwsem); 1161 1154 page_cache_release(page); 1162 - 1163 - if (copied > 0) { 1164 - int dirty; 1165 - spin_lock(&ci->i_ceph_lock); 1166 - dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); 1167 - spin_unlock(&ci->i_ceph_lock); 1168 - if (dirty) 1169 - __mark_inode_dirty(inode, dirty); 1170 - } 1171 - 1172 - dout("write_end %p %llx.%llx %llu~%u dropping cap refs on %s\n", 1173 - inode, ceph_vinop(inode), pos, len, ceph_cap_string(got)); 1174 - ceph_put_cap_refs(ci, got); 1175 1155 1176 1156 if (check_cap) 1177 1157 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL);
+20 -13
fs/ceph/caps.c
··· 490 490 ci->i_rdcache_gen++; 491 491 492 492 /* 493 - * if we are newly issued FILE_SHARED, clear D_COMPLETE; we 493 + * if we are newly issued FILE_SHARED, mark dir not complete; we 494 494 * don't know what happened to this directory while we didn't 495 495 * have the cap. 496 496 */ 497 497 if ((issued & CEPH_CAP_FILE_SHARED) && 498 498 (had & CEPH_CAP_FILE_SHARED) == 0) { 499 499 ci->i_shared_gen++; 500 - if (S_ISDIR(ci->vfs_inode.i_mode)) 501 - ceph_dir_clear_complete(&ci->vfs_inode); 500 + if (S_ISDIR(ci->vfs_inode.i_mode)) { 501 + dout(" marking %p NOT complete\n", &ci->vfs_inode); 502 + __ceph_dir_clear_complete(ci); 503 + } 502 504 } 503 505 } 504 506 ··· 555 553 cap->implemented = 0; 556 554 cap->mds = mds; 557 555 cap->mds_wanted = 0; 556 + cap->mseq = 0; 558 557 559 558 cap->ci = ci; 560 559 __insert_cap_node(ci, cap); ··· 631 628 cap->cap_id = cap_id; 632 629 cap->issued = issued; 633 630 cap->implemented |= issued; 634 - cap->mds_wanted |= wanted; 631 + if (mseq > cap->mseq) 632 + cap->mds_wanted = wanted; 633 + else 634 + cap->mds_wanted |= wanted; 635 635 cap->seq = seq; 636 636 cap->issue_seq = seq; 637 637 cap->mseq = mseq; ··· 1003 997 return 0; 1004 998 } 1005 999 1006 - static void __queue_cap_release(struct ceph_mds_session *session, 1007 - u64 ino, u64 cap_id, u32 migrate_seq, 1008 - u32 issue_seq) 1000 + void __queue_cap_release(struct ceph_mds_session *session, 1001 + u64 ino, u64 cap_id, u32 migrate_seq, 1002 + u32 issue_seq) 1009 1003 { 1010 1004 struct ceph_msg *msg; 1011 1005 struct ceph_mds_cap_release *head; ··· 2052 2046 goto out; 2053 2047 } 2054 2048 2049 + /* finish pending truncate */ 2050 + while (ci->i_truncate_pending) { 2051 + spin_unlock(&ci->i_ceph_lock); 2052 + __ceph_do_pending_vmtruncate(inode, !(need & CEPH_CAP_FILE_WR)); 2053 + spin_lock(&ci->i_ceph_lock); 2054 + } 2055 + 2055 2056 if (need & CEPH_CAP_FILE_WR) { 2056 2057 if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) { 2057 2058 dout("get_cap_refs %p endoff %llu > maxsize %llu\n", ··· 2079 2066 } 2080 2067 } 2081 2068 have = __ceph_caps_issued(ci, &implemented); 2082 - 2083 - /* 2084 - * disallow writes while a truncate is pending 2085 - */ 2086 - if (ci->i_truncate_pending) 2087 - have &= ~CEPH_CAP_FILE_WR; 2088 2069 2089 2070 if ((have & need) == need) { 2090 2071 /*
+13 -52
fs/ceph/dir.c
··· 107 107 * falling back to a "normal" sync readdir if any dentries in the dir 108 108 * are dropped. 109 109 * 110 - * D_COMPLETE tells indicates we have all dentries in the dir. It is 110 + * Complete dir indicates that we have all dentries in the dir. It is 111 111 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by 112 112 * the MDS if/when the directory is modified). 113 113 */ ··· 198 198 filp->f_pos++; 199 199 200 200 /* make sure a dentry wasn't dropped while we didn't have parent lock */ 201 - if (!ceph_dir_test_complete(dir)) { 202 - dout(" lost D_COMPLETE on %p; falling back to mds\n", dir); 201 + if (!ceph_dir_is_complete(dir)) { 202 + dout(" lost dir complete on %p; falling back to mds\n", dir); 203 203 err = -EAGAIN; 204 204 goto out; 205 205 } ··· 258 258 if (filp->f_pos == 0) { 259 259 /* note dir version at start of readdir so we can tell 260 260 * if any dentries get dropped */ 261 - fi->dir_release_count = ci->i_release_count; 261 + fi->dir_release_count = atomic_read(&ci->i_release_count); 262 262 263 263 dout("readdir off 0 -> '.'\n"); 264 264 if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0), ··· 284 284 if ((filp->f_pos == 2 || fi->dentry) && 285 285 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) && 286 286 ceph_snap(inode) != CEPH_SNAPDIR && 287 - ceph_dir_test_complete(inode) && 287 + __ceph_dir_is_complete(ci) && 288 288 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) { 289 289 spin_unlock(&ci->i_ceph_lock); 290 290 err = __dcache_readdir(filp, dirent, filldir); ··· 350 350 351 351 if (!req->r_did_prepopulate) { 352 352 dout("readdir !did_prepopulate"); 353 - fi->dir_release_count--; /* preclude D_COMPLETE */ 353 + /* preclude from marking dir complete */ 354 + fi->dir_release_count--; 354 355 } 355 356 356 357 /* note next offset and last dentry name */ ··· 429 428 * the complete dir contents in our cache. 430 429 */ 431 430 spin_lock(&ci->i_ceph_lock); 432 - if (ci->i_release_count == fi->dir_release_count) { 433 - ceph_dir_set_complete(inode); 431 + if (atomic_read(&ci->i_release_count) == fi->dir_release_count) { 432 + dout(" marking %p complete\n", inode); 433 + __ceph_dir_set_complete(ci, fi->dir_release_count); 434 434 ci->i_max_offset = filp->f_pos; 435 435 } 436 436 spin_unlock(&ci->i_ceph_lock); ··· 606 604 fsc->mount_options->snapdir_name, 607 605 dentry->d_name.len) && 608 606 !is_root_ceph_dentry(dir, dentry) && 609 - ceph_dir_test_complete(dir) && 607 + __ceph_dir_is_complete(ci) && 610 608 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) { 611 609 spin_unlock(&ci->i_ceph_lock); 612 610 dout(" dir %p complete, -ENOENT\n", dir); ··· 1067 1065 } 1068 1066 1069 1067 /* 1070 - * Set/clear/test dir complete flag on the dir's dentry. 1071 - */ 1072 - void ceph_dir_set_complete(struct inode *inode) 1073 - { 1074 - struct dentry *dentry = d_find_any_alias(inode); 1075 - 1076 - if (dentry && ceph_dentry(dentry) && 1077 - ceph_test_mount_opt(ceph_sb_to_client(dentry->d_sb), DCACHE)) { 1078 - dout(" marking %p (%p) complete\n", inode, dentry); 1079 - set_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags); 1080 - } 1081 - dput(dentry); 1082 - } 1083 - 1084 - void ceph_dir_clear_complete(struct inode *inode) 1085 - { 1086 - struct dentry *dentry = d_find_any_alias(inode); 1087 - 1088 - if (dentry && ceph_dentry(dentry)) { 1089 - dout(" marking %p (%p) complete\n", inode, dentry); 1090 - set_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags); 1091 - } 1092 - dput(dentry); 1093 - } 1094 - 1095 - bool ceph_dir_test_complete(struct inode *inode) 1096 - { 1097 - struct dentry *dentry = d_find_any_alias(inode); 1098 - 1099 - if (dentry && ceph_dentry(dentry)) { 1100 - dout(" marking %p (%p) NOT complete\n", inode, dentry); 1101 - clear_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags); 1102 - } 1103 - dput(dentry); 1104 - return false; 1105 - } 1106 - 1107 - /* 1108 1068 * When the VFS prunes a dentry from the cache, we need to clear the 1109 1069 * complete flag on the parent directory. 1110 1070 * ··· 1074 1110 */ 1075 1111 static void ceph_d_prune(struct dentry *dentry) 1076 1112 { 1077 - struct ceph_dentry_info *di; 1078 - 1079 1113 dout("ceph_d_prune %p\n", dentry); 1080 1114 1081 1115 /* do we have a valid parent? */ 1082 1116 if (IS_ROOT(dentry)) 1083 1117 return; 1084 1118 1085 - /* if we are not hashed, we don't affect D_COMPLETE */ 1119 + /* if we are not hashed, we don't affect dir's completeness */ 1086 1120 if (d_unhashed(dentry)) 1087 1121 return; 1088 1122 ··· 1088 1126 * we hold d_lock, so d_parent is stable, and d_fsdata is never 1089 1127 * cleared until d_release 1090 1128 */ 1091 - di = ceph_dentry(dentry->d_parent); 1092 - clear_bit(CEPH_D_COMPLETE, &di->flags); 1129 + ceph_dir_clear_complete(dentry->d_parent->d_inode); 1093 1130 } 1094 1131 1095 1132 /*
+139 -112
fs/ceph/file.c
··· 446 446 } 447 447 448 448 /* 449 - * Write commit callback, called if we requested both an ACK and 450 - * ONDISK commit reply from the OSD. 449 + * Write commit request unsafe callback, called to tell us when a 450 + * request is unsafe (that is, in flight--has been handed to the 451 + * messenger to send to its target osd). It is called again when 452 + * we've received a response message indicating the request is 453 + * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request 454 + * is completed early (and unsuccessfully) due to a timeout or 455 + * interrupt. 456 + * 457 + * This is used if we requested both an ACK and ONDISK commit reply 458 + * from the OSD. 451 459 */ 452 - static void sync_write_commit(struct ceph_osd_request *req, 453 - struct ceph_msg *msg) 460 + static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe) 454 461 { 455 462 struct ceph_inode_info *ci = ceph_inode(req->r_inode); 456 463 457 - dout("sync_write_commit %p tid %llu\n", req, req->r_tid); 458 - spin_lock(&ci->i_unsafe_lock); 459 - list_del_init(&req->r_unsafe_item); 460 - spin_unlock(&ci->i_unsafe_lock); 461 - ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR); 464 + dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid, 465 + unsafe ? "un" : ""); 466 + if (unsafe) { 467 + ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR); 468 + spin_lock(&ci->i_unsafe_lock); 469 + list_add_tail(&req->r_unsafe_item, 470 + &ci->i_unsafe_writes); 471 + spin_unlock(&ci->i_unsafe_lock); 472 + } else { 473 + spin_lock(&ci->i_unsafe_lock); 474 + list_del_init(&req->r_unsafe_item); 475 + spin_unlock(&ci->i_unsafe_lock); 476 + ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR); 477 + } 462 478 } 463 479 464 480 /* ··· 486 470 * objects, rollback on failure, etc.) 487 471 */ 488 472 static ssize_t ceph_sync_write(struct file *file, const char __user *data, 489 - size_t left, loff_t *offset) 473 + size_t left, loff_t pos, loff_t *ppos) 490 474 { 491 475 struct inode *inode = file_inode(file); 492 476 struct ceph_inode_info *ci = ceph_inode(inode); 493 477 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 478 + struct ceph_snap_context *snapc; 479 + struct ceph_vino vino; 494 480 struct ceph_osd_request *req; 481 + int num_ops = 1; 495 482 struct page **pages; 496 483 int num_pages; 497 - long long unsigned pos; 498 484 u64 len; 499 485 int written = 0; 500 486 int flags; 501 - int do_sync = 0; 502 487 int check_caps = 0; 503 488 int page_align, io_align; 504 489 unsigned long buf_align; 505 490 int ret; 506 491 struct timespec mtime = CURRENT_TIME; 492 + bool own_pages = false; 507 493 508 494 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP) 509 495 return -EROFS; 510 496 511 - dout("sync_write on file %p %lld~%u %s\n", file, *offset, 497 + dout("sync_write on file %p %lld~%u %s\n", file, pos, 512 498 (unsigned)left, (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); 513 - 514 - if (file->f_flags & O_APPEND) 515 - pos = i_size_read(inode); 516 - else 517 - pos = *offset; 518 499 519 500 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left); 520 501 if (ret < 0) ··· 529 516 if ((file->f_flags & (O_SYNC|O_DIRECT)) == 0) 530 517 flags |= CEPH_OSD_FLAG_ACK; 531 518 else 532 - do_sync = 1; 519 + num_ops++; /* Also include a 'startsync' command. */ 533 520 534 521 /* 535 522 * we may need to do multiple writes here if we span an object ··· 539 526 io_align = pos & ~PAGE_MASK; 540 527 buf_align = (unsigned long)data & ~PAGE_MASK; 541 528 len = left; 542 - if (file->f_flags & O_DIRECT) { 543 - /* write from beginning of first page, regardless of 544 - io alignment */ 545 - page_align = (pos - io_align + buf_align) & ~PAGE_MASK; 546 - num_pages = calc_pages_for((unsigned long)data, len); 547 - } else { 548 - page_align = pos & ~PAGE_MASK; 549 - num_pages = calc_pages_for(pos, len); 550 - } 529 + 530 + snapc = ci->i_snap_realm->cached_context; 531 + vino = ceph_vino(inode); 551 532 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 552 - ceph_vino(inode), pos, &len, 553 - CEPH_OSD_OP_WRITE, flags, 554 - ci->i_snap_realm->cached_context, 555 - do_sync, 533 + vino, pos, &len, num_ops, 534 + CEPH_OSD_OP_WRITE, flags, snapc, 556 535 ci->i_truncate_seq, ci->i_truncate_size, 557 - &mtime, false, page_align); 536 + false); 558 537 if (IS_ERR(req)) 559 538 return PTR_ERR(req); 560 539 540 + /* write from beginning of first page, regardless of io alignment */ 541 + page_align = file->f_flags & O_DIRECT ? buf_align : io_align; 542 + num_pages = calc_pages_for(page_align, len); 561 543 if (file->f_flags & O_DIRECT) { 562 544 pages = ceph_get_direct_page_vector(data, num_pages, false); 563 545 if (IS_ERR(pages)) { ··· 580 572 581 573 if ((file->f_flags & O_SYNC) == 0) { 582 574 /* get a second commit callback */ 583 - req->r_safe_callback = sync_write_commit; 584 - req->r_own_pages = 1; 575 + req->r_unsafe_callback = ceph_sync_write_unsafe; 576 + req->r_inode = inode; 577 + own_pages = true; 585 578 } 586 579 } 587 - req->r_pages = pages; 588 - req->r_num_pages = num_pages; 589 - req->r_inode = inode; 580 + osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align, 581 + false, own_pages); 582 + 583 + /* BUG_ON(vino.snap != CEPH_NOSNAP); */ 584 + ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime); 590 585 591 586 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); 592 - if (!ret) { 593 - if (req->r_safe_callback) { 594 - /* 595 - * Add to inode unsafe list only after we 596 - * start_request so that a tid has been assigned. 597 - */ 598 - spin_lock(&ci->i_unsafe_lock); 599 - list_add_tail(&req->r_unsafe_item, 600 - &ci->i_unsafe_writes); 601 - spin_unlock(&ci->i_unsafe_lock); 602 - ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR); 603 - } 604 - 587 + if (!ret) 605 588 ret = ceph_osdc_wait_request(&fsc->client->osdc, req); 606 - if (ret < 0 && req->r_safe_callback) { 607 - spin_lock(&ci->i_unsafe_lock); 608 - list_del_init(&req->r_unsafe_item); 609 - spin_unlock(&ci->i_unsafe_lock); 610 - ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR); 611 - } 612 - } 613 589 614 590 if (file->f_flags & O_DIRECT) 615 591 ceph_put_page_vector(pages, num_pages, false); ··· 606 614 pos += len; 607 615 written += len; 608 616 left -= len; 609 - data += written; 617 + data += len; 610 618 if (left) 611 619 goto more; 612 620 613 621 ret = written; 614 - *offset = pos; 622 + *ppos = pos; 615 623 if (pos > i_size_read(inode)) 616 624 check_caps = ceph_inode_set_size(inode, pos); 617 625 if (check_caps) ··· 645 653 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", 646 654 inode, ceph_vinop(inode), pos, (unsigned)len, inode); 647 655 again: 648 - __ceph_do_pending_vmtruncate(inode); 649 656 if (fi->fmode & CEPH_FILE_MODE_LAZY) 650 657 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 651 658 else ··· 708 717 struct ceph_inode_info *ci = ceph_inode(inode); 709 718 struct ceph_osd_client *osdc = 710 719 &ceph_sb_to_client(inode->i_sb)->client->osdc; 711 - loff_t endoff = pos + iov->iov_len; 712 - int got = 0; 713 - int ret, err, written; 720 + ssize_t count, written = 0; 721 + int err, want, got; 722 + bool hold_mutex; 714 723 715 724 if (ceph_snap(inode) != CEPH_NOSNAP) 716 725 return -EROFS; 717 726 718 - retry_snap: 719 - written = 0; 720 - if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) 721 - return -ENOSPC; 722 - __ceph_do_pending_vmtruncate(inode); 727 + sb_start_write(inode->i_sb); 728 + mutex_lock(&inode->i_mutex); 729 + hold_mutex = true; 723 730 724 - /* 725 - * try to do a buffered write. if we don't have sufficient 726 - * caps, we'll get -EAGAIN from generic_file_aio_write, or a 727 - * short write if we only get caps for some pages. 728 - */ 729 - if (!(iocb->ki_filp->f_flags & O_DIRECT) && 730 - !(inode->i_sb->s_flags & MS_SYNCHRONOUS) && 731 - !(fi->flags & CEPH_F_SYNC)) { 732 - ret = generic_file_aio_write(iocb, iov, nr_segs, pos); 733 - if (ret >= 0) 734 - written = ret; 735 - 736 - if ((ret >= 0 || ret == -EIOCBQUEUED) && 737 - ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) 738 - || ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) { 739 - err = vfs_fsync_range(file, pos, pos + written - 1, 1); 740 - if (err < 0) 741 - ret = err; 742 - } 743 - if ((ret < 0 && ret != -EAGAIN) || pos + written >= endoff) 744 - goto out; 745 - } 746 - 747 - dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n", 748 - inode, ceph_vinop(inode), pos + written, 749 - (unsigned)iov->iov_len - written, inode->i_size); 750 - ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, 0, &got, endoff); 751 - if (ret < 0) 731 + err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ); 732 + if (err) 752 733 goto out; 753 734 754 - dout("aio_write %p %llx.%llx %llu~%u got cap refs on %s\n", 755 - inode, ceph_vinop(inode), pos + written, 756 - (unsigned)iov->iov_len - written, ceph_cap_string(got)); 757 - ret = ceph_sync_write(file, iov->iov_base + written, 758 - iov->iov_len - written, &iocb->ki_pos); 759 - if (ret >= 0) { 735 + /* We can write back this queue in page reclaim */ 736 + current->backing_dev_info = file->f_mapping->backing_dev_info; 737 + 738 + err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 739 + if (err) 740 + goto out; 741 + 742 + if (count == 0) 743 + goto out; 744 + 745 + err = file_remove_suid(file); 746 + if (err) 747 + goto out; 748 + 749 + err = file_update_time(file); 750 + if (err) 751 + goto out; 752 + 753 + retry_snap: 754 + if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) { 755 + err = -ENOSPC; 756 + goto out; 757 + } 758 + 759 + dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n", 760 + inode, ceph_vinop(inode), pos, count, inode->i_size); 761 + if (fi->fmode & CEPH_FILE_MODE_LAZY) 762 + want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 763 + else 764 + want = CEPH_CAP_FILE_BUFFER; 765 + got = 0; 766 + err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, pos + count); 767 + if (err < 0) 768 + goto out; 769 + 770 + dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n", 771 + inode, ceph_vinop(inode), pos, count, ceph_cap_string(got)); 772 + 773 + if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 || 774 + (iocb->ki_filp->f_flags & O_DIRECT) || 775 + (inode->i_sb->s_flags & MS_SYNCHRONOUS) || 776 + (fi->flags & CEPH_F_SYNC)) { 777 + mutex_unlock(&inode->i_mutex); 778 + written = ceph_sync_write(file, iov->iov_base, count, 779 + pos, &iocb->ki_pos); 780 + } else { 781 + written = generic_file_buffered_write(iocb, iov, nr_segs, 782 + pos, &iocb->ki_pos, 783 + count, 0); 784 + mutex_unlock(&inode->i_mutex); 785 + } 786 + hold_mutex = false; 787 + 788 + if (written >= 0) { 760 789 int dirty; 761 790 spin_lock(&ci->i_ceph_lock); 762 791 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); ··· 784 773 if (dirty) 785 774 __mark_inode_dirty(inode, dirty); 786 775 } 776 + 787 777 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n", 788 - inode, ceph_vinop(inode), pos + written, 789 - (unsigned)iov->iov_len - written, ceph_cap_string(got)); 778 + inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, 779 + ceph_cap_string(got)); 790 780 ceph_put_cap_refs(ci, got); 791 - out: 792 - if (ret == -EOLDSNAPC) { 793 - dout("aio_write %p %llx.%llx %llu~%u got EOLDSNAPC, retrying\n", 794 - inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len); 795 - goto retry_snap; 781 + 782 + if (written >= 0 && 783 + ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) || 784 + ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) { 785 + err = vfs_fsync_range(file, pos, pos + written - 1, 1); 786 + if (err < 0) 787 + written = err; 796 788 } 797 789 798 - return ret; 790 + if (written == -EOLDSNAPC) { 791 + dout("aio_write %p %llx.%llx %llu~%u got EOLDSNAPC, retrying\n", 792 + inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len); 793 + mutex_lock(&inode->i_mutex); 794 + hold_mutex = true; 795 + goto retry_snap; 796 + } 797 + out: 798 + if (hold_mutex) 799 + mutex_unlock(&inode->i_mutex); 800 + sb_end_write(inode->i_sb); 801 + current->backing_dev_info = NULL; 802 + 803 + return written ? written : err; 799 804 } 800 805 801 806 /* ··· 823 796 int ret; 824 797 825 798 mutex_lock(&inode->i_mutex); 826 - __ceph_do_pending_vmtruncate(inode); 799 + __ceph_do_pending_vmtruncate(inode, false); 827 800 828 801 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) { 829 802 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
+31 -28
fs/ceph/inode.c
··· 302 302 ci->i_version = 0; 303 303 ci->i_time_warp_seq = 0; 304 304 ci->i_ceph_flags = 0; 305 - ci->i_release_count = 0; 305 + atomic_set(&ci->i_release_count, 1); 306 + atomic_set(&ci->i_complete_count, 0); 306 307 ci->i_symlink = NULL; 307 308 308 309 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout)); ··· 562 561 struct ceph_inode_info *ci = ceph_inode(inode); 563 562 int i; 564 563 int issued = 0, implemented; 565 - int updating_inode = 0; 566 564 struct timespec mtime, atime, ctime; 567 565 u32 nsplits; 568 566 struct ceph_buffer *xattr_blob = NULL; ··· 601 601 (ci->i_version & ~1) >= le64_to_cpu(info->version)) 602 602 goto no_change; 603 603 604 - updating_inode = 1; 605 604 issued = __ceph_caps_issued(ci, &implemented); 606 605 issued |= implemented | __ceph_caps_dirty(ci); 607 606 ··· 716 717 ceph_vinop(inode), inode->i_mode); 717 718 } 718 719 720 + /* set dir completion flag? */ 721 + if (S_ISDIR(inode->i_mode) && 722 + ci->i_files == 0 && ci->i_subdirs == 0 && 723 + ceph_snap(inode) == CEPH_NOSNAP && 724 + (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) && 725 + (issued & CEPH_CAP_FILE_EXCL) == 0 && 726 + !__ceph_dir_is_complete(ci)) { 727 + dout(" marking %p complete (empty)\n", inode); 728 + __ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count)); 729 + ci->i_max_offset = 2; 730 + } 719 731 no_change: 720 732 spin_unlock(&ci->i_ceph_lock); 721 733 ··· 775 765 pr_warning("mds issued no caps on %llx.%llx\n", 776 766 ceph_vinop(inode)); 777 767 __ceph_get_fmode(ci, cap_fmode); 778 - } 779 - 780 - /* set dir completion flag? */ 781 - if (S_ISDIR(inode->i_mode) && 782 - updating_inode && /* didn't jump to no_change */ 783 - ci->i_files == 0 && ci->i_subdirs == 0 && 784 - ceph_snap(inode) == CEPH_NOSNAP && 785 - (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) && 786 - (issued & CEPH_CAP_FILE_EXCL) == 0 && 787 - !ceph_dir_test_complete(inode)) { 788 - dout(" marking %p complete (empty)\n", inode); 789 - ceph_dir_set_complete(inode); 790 - ci->i_max_offset = 2; 791 768 } 792 769 793 770 /* update delegation info? */ ··· 858 861 di = ceph_dentry(dn); 859 862 860 863 spin_lock(&ci->i_ceph_lock); 861 - if (!ceph_dir_test_complete(inode)) { 864 + if (!__ceph_dir_is_complete(ci)) { 862 865 spin_unlock(&ci->i_ceph_lock); 863 866 return; 864 867 } ··· 1062 1065 /* 1063 1066 * d_move() puts the renamed dentry at the end of 1064 1067 * d_subdirs. We need to assign it an appropriate 1065 - * directory offset so we can behave when holding 1066 - * D_COMPLETE. 1068 + * directory offset so we can behave when dir is 1069 + * complete. 1067 1070 */ 1068 1071 ceph_set_dentry_offset(req->r_old_dentry); 1069 1072 dout("dn %p gets new offset %lld\n", req->r_old_dentry, ··· 1454 1457 1455 1458 1456 1459 /* 1457 - * called by trunc_wq; take i_mutex ourselves 1460 + * called by trunc_wq; 1458 1461 * 1459 1462 * We also truncate in a separate thread as well. 1460 1463 */ ··· 1465 1468 struct inode *inode = &ci->vfs_inode; 1466 1469 1467 1470 dout("vmtruncate_work %p\n", inode); 1468 - mutex_lock(&inode->i_mutex); 1469 - __ceph_do_pending_vmtruncate(inode); 1470 - mutex_unlock(&inode->i_mutex); 1471 + __ceph_do_pending_vmtruncate(inode, true); 1471 1472 iput(inode); 1472 1473 } 1473 1474 ··· 1489 1494 } 1490 1495 1491 1496 /* 1492 - * called with i_mutex held. 1493 - * 1494 1497 * Make sure any pending truncation is applied before doing anything 1495 1498 * that may depend on it. 1496 1499 */ 1497 - void __ceph_do_pending_vmtruncate(struct inode *inode) 1500 + void __ceph_do_pending_vmtruncate(struct inode *inode, bool needlock) 1498 1501 { 1499 1502 struct ceph_inode_info *ci = ceph_inode(inode); 1500 1503 u64 to; ··· 1525 1532 ci->i_truncate_pending, to); 1526 1533 spin_unlock(&ci->i_ceph_lock); 1527 1534 1535 + if (needlock) 1536 + mutex_lock(&inode->i_mutex); 1528 1537 truncate_inode_pages(inode->i_mapping, to); 1538 + if (needlock) 1539 + mutex_unlock(&inode->i_mutex); 1529 1540 1530 1541 spin_lock(&ci->i_ceph_lock); 1531 1542 if (to == ci->i_truncate_size) { ··· 1560 1563 static const struct inode_operations ceph_symlink_iops = { 1561 1564 .readlink = generic_readlink, 1562 1565 .follow_link = ceph_sym_follow_link, 1566 + .setattr = ceph_setattr, 1567 + .getattr = ceph_getattr, 1568 + .setxattr = ceph_setxattr, 1569 + .getxattr = ceph_getxattr, 1570 + .listxattr = ceph_listxattr, 1571 + .removexattr = ceph_removexattr, 1563 1572 }; 1564 1573 1565 1574 /* ··· 1588 1585 if (ceph_snap(inode) != CEPH_NOSNAP) 1589 1586 return -EROFS; 1590 1587 1591 - __ceph_do_pending_vmtruncate(inode); 1588 + __ceph_do_pending_vmtruncate(inode, false); 1592 1589 1593 1590 err = inode_change_ok(inode, attr); 1594 1591 if (err != 0) ··· 1770 1767 ceph_cap_string(dirtied), mask); 1771 1768 1772 1769 ceph_mdsc_put_request(req); 1773 - __ceph_do_pending_vmtruncate(inode); 1770 + __ceph_do_pending_vmtruncate(inode, false); 1774 1771 return err; 1775 1772 out: 1776 1773 spin_unlock(&ci->i_ceph_lock);
+3 -2
fs/ceph/ioctl.c
··· 208 208 209 209 snprintf(dl.object_name, sizeof(dl.object_name), "%llx.%08llx", 210 210 ceph_ino(inode), dl.object_no); 211 - ceph_calc_object_layout(&pgid, dl.object_name, &ci->i_layout, 212 - osdc->osdmap); 211 + 212 + ceph_calc_ceph_pg(&pgid, dl.object_name, osdc->osdmap, 213 + ceph_file_layout_pg_pool(ci->i_layout)); 213 214 214 215 dl.osd = ceph_calc_pg_primary(osdc->osdmap, pgid); 215 216 if (dl.osd >= 0) {
+55 -24
fs/ceph/mds_client.c
··· 265 265 { 266 266 if (info->head->op == CEPH_MDS_OP_GETFILELOCK) 267 267 return parse_reply_info_filelock(p, end, info, features); 268 - else if (info->head->op == CEPH_MDS_OP_READDIR) 268 + else if (info->head->op == CEPH_MDS_OP_READDIR || 269 + info->head->op == CEPH_MDS_OP_LSSNAP) 269 270 return parse_reply_info_dir(p, end, info, features); 270 271 else if (info->head->op == CEPH_MDS_OP_CREATE) 271 272 return parse_reply_info_create(p, end, info, features); ··· 365 364 atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1); 366 365 if (atomic_dec_and_test(&s->s_ref)) { 367 366 if (s->s_auth.authorizer) 368 - s->s_mdsc->fsc->client->monc.auth->ops->destroy_authorizer( 369 - s->s_mdsc->fsc->client->monc.auth, 370 - s->s_auth.authorizer); 367 + ceph_auth_destroy_authorizer( 368 + s->s_mdsc->fsc->client->monc.auth, 369 + s->s_auth.authorizer); 371 370 kfree(s); 372 371 } 373 372 } ··· 1197 1196 session->s_trim_caps--; 1198 1197 if (oissued) { 1199 1198 /* we aren't the only cap.. just remove us */ 1199 + __queue_cap_release(session, ceph_ino(inode), cap->cap_id, 1200 + cap->mseq, cap->issue_seq); 1200 1201 __ceph_remove_cap(cap); 1201 1202 } else { 1202 1203 /* try to drop referring dentries */ ··· 1721 1718 msg->front.iov_len = p - msg->front.iov_base; 1722 1719 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 1723 1720 1724 - msg->pages = req->r_pages; 1725 - msg->nr_pages = req->r_num_pages; 1721 + if (req->r_data_len) { 1722 + /* outbound data set only by ceph_sync_setxattr() */ 1723 + BUG_ON(!req->r_pages); 1724 + ceph_msg_data_add_pages(msg, req->r_pages, req->r_data_len, 0); 1725 + } 1726 + 1726 1727 msg->hdr.data_len = cpu_to_le32(req->r_data_len); 1727 1728 msg->hdr.data_off = cpu_to_le16(0); 1728 1729 ··· 1920 1913 req = list_entry(tmp_list.next, 1921 1914 struct ceph_mds_request, r_wait); 1922 1915 list_del_init(&req->r_wait); 1916 + dout(" wake request %p tid %llu\n", req, req->r_tid); 1923 1917 __do_request(mdsc, req); 1924 1918 } 1925 1919 } ··· 2034 2026 } 2035 2027 2036 2028 /* 2037 - * Invalidate dir D_COMPLETE, dentry lease state on an aborted MDS 2029 + * Invalidate dir's completeness, dentry lease state on an aborted MDS 2038 2030 * namespace request. 2039 2031 */ 2040 2032 void ceph_invalidate_dir_request(struct ceph_mds_request *req) 2041 2033 { 2042 2034 struct inode *inode = req->r_locked_dir; 2043 - struct ceph_inode_info *ci = ceph_inode(inode); 2044 2035 2045 - dout("invalidate_dir_request %p (D_COMPLETE, lease(s))\n", inode); 2046 - spin_lock(&ci->i_ceph_lock); 2036 + dout("invalidate_dir_request %p (complete, lease(s))\n", inode); 2037 + 2047 2038 ceph_dir_clear_complete(inode); 2048 - ci->i_release_count++; 2049 - spin_unlock(&ci->i_ceph_lock); 2050 - 2051 2039 if (req->r_dentry) 2052 2040 ceph_invalidate_dentry_lease(req->r_dentry); 2053 2041 if (req->r_old_dentry) ··· 2603 2599 goto fail; 2604 2600 } 2605 2601 2606 - reply->pagelist = pagelist; 2607 2602 if (recon_state.flock) 2608 2603 reply->hdr.version = cpu_to_le16(2); 2609 - reply->hdr.data_len = cpu_to_le32(pagelist->length); 2610 - reply->nr_pages = calc_pages_for(0, pagelist->length); 2604 + if (pagelist->length) { 2605 + /* set up outbound data if we have any */ 2606 + reply->hdr.data_len = cpu_to_le32(pagelist->length); 2607 + ceph_msg_data_add_pagelist(reply, pagelist); 2608 + } 2611 2609 ceph_con_send(&session->s_con, reply); 2612 2610 2613 2611 mutex_unlock(&session->s_mutex); ··· 3439 3433 struct ceph_auth_handshake *auth = &s->s_auth; 3440 3434 3441 3435 if (force_new && auth->authorizer) { 3442 - if (ac->ops && ac->ops->destroy_authorizer) 3443 - ac->ops->destroy_authorizer(ac, auth->authorizer); 3436 + ceph_auth_destroy_authorizer(ac, auth->authorizer); 3444 3437 auth->authorizer = NULL; 3445 3438 } 3446 - if (!auth->authorizer && ac->ops && ac->ops->create_authorizer) { 3447 - int ret = ac->ops->create_authorizer(ac, CEPH_ENTITY_TYPE_MDS, 3448 - auth); 3439 + if (!auth->authorizer) { 3440 + int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_MDS, 3441 + auth); 3442 + if (ret) 3443 + return ERR_PTR(ret); 3444 + } else { 3445 + int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_MDS, 3446 + auth); 3449 3447 if (ret) 3450 3448 return ERR_PTR(ret); 3451 3449 } ··· 3465 3455 struct ceph_mds_client *mdsc = s->s_mdsc; 3466 3456 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; 3467 3457 3468 - return ac->ops->verify_authorizer_reply(ac, s->s_auth.authorizer, len); 3458 + return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer, len); 3469 3459 } 3470 3460 3471 3461 static int invalidate_authorizer(struct ceph_connection *con) ··· 3474 3464 struct ceph_mds_client *mdsc = s->s_mdsc; 3475 3465 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; 3476 3466 3477 - if (ac->ops->invalidate_authorizer) 3478 - ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS); 3467 + ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS); 3479 3468 3480 3469 return ceph_monc_validate_auth(&mdsc->fsc->client->monc); 3470 + } 3471 + 3472 + static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con, 3473 + struct ceph_msg_header *hdr, int *skip) 3474 + { 3475 + struct ceph_msg *msg; 3476 + int type = (int) le16_to_cpu(hdr->type); 3477 + int front_len = (int) le32_to_cpu(hdr->front_len); 3478 + 3479 + if (con->in_msg) 3480 + return con->in_msg; 3481 + 3482 + *skip = 0; 3483 + msg = ceph_msg_new(type, front_len, GFP_NOFS, false); 3484 + if (!msg) { 3485 + pr_err("unable to allocate msg type %d len %d\n", 3486 + type, front_len); 3487 + return NULL; 3488 + } 3489 + 3490 + return msg; 3481 3491 } 3482 3492 3483 3493 static const struct ceph_connection_operations mds_con_ops = { ··· 3508 3478 .verify_authorizer_reply = verify_authorizer_reply, 3509 3479 .invalidate_authorizer = invalidate_authorizer, 3510 3480 .peer_reset = peer_reset, 3481 + .alloc_msg = mds_alloc_msg, 3511 3482 }; 3512 3483 3513 3484 /* eof */
+5 -3
fs/ceph/mdsmap.c
··· 20 20 { 21 21 int n = 0; 22 22 int i; 23 - char r; 23 + 24 + /* special case for one mds */ 25 + if (1 == m->m_max_mds && m->m_info[0].state > 0) 26 + return 0; 24 27 25 28 /* count */ 26 29 for (i = 0; i < m->m_max_mds; i++) ··· 33 30 return -1; 34 31 35 32 /* pick */ 36 - get_random_bytes(&r, 1); 37 - n = r % n; 33 + n = prandom_u32() % n; 38 34 i = 0; 39 35 for (i = 0; n > 0; i++, n--) 40 36 while (m->m_info[i].state <= 0)
+1 -2
fs/ceph/snap.c
··· 332 332 err = -ENOMEM; 333 333 if (num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64)) 334 334 goto fail; 335 - snapc = kzalloc(sizeof(*snapc) + num*sizeof(u64), GFP_NOFS); 335 + snapc = ceph_create_snap_context(num, GFP_NOFS); 336 336 if (!snapc) 337 337 goto fail; 338 - atomic_set(&snapc->nref, 1); 339 338 340 339 /* build (reverse sorted) snap vector */ 341 340 num = 0;
+5 -2
fs/ceph/super.c
··· 479 479 CEPH_FEATURE_FLOCK | 480 480 CEPH_FEATURE_DIRLAYOUTHASH; 481 481 const unsigned required_features = 0; 482 + int page_count; 483 + size_t size; 482 484 int err = -ENOMEM; 483 485 484 486 fsc = kzalloc(sizeof(*fsc), GFP_KERNEL); ··· 524 522 525 523 /* set up mempools */ 526 524 err = -ENOMEM; 527 - fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, 528 - fsc->mount_options->wsize >> PAGE_CACHE_SHIFT); 525 + page_count = fsc->mount_options->wsize >> PAGE_CACHE_SHIFT; 526 + size = sizeof (struct page *) * (page_count ? page_count : 1); 527 + fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size); 529 528 if (!fsc->wb_pagevec_pool) 530 529 goto fail_trunc_wq; 531 530
+24 -43
fs/ceph/super.h
··· 204 204 * Ceph dentry state 205 205 */ 206 206 struct ceph_dentry_info { 207 - unsigned long flags; 208 207 struct ceph_mds_session *lease_session; 209 208 u32 lease_gen, lease_shared_gen; 210 209 u32 lease_seq; ··· 213 214 u64 time; 214 215 u64 offset; 215 216 }; 216 - 217 - /* 218 - * dentry flags 219 - * 220 - * The locking for D_COMPLETE is a bit odd: 221 - * - we can clear it at almost any time (see ceph_d_prune) 222 - * - it is only meaningful if: 223 - * - we hold dir inode i_ceph_lock 224 - * - we hold dir FILE_SHARED caps 225 - * - the dentry D_COMPLETE is set 226 - */ 227 - #define CEPH_D_COMPLETE 1 /* if set, d_u.d_subdirs is complete directory */ 228 217 229 218 struct ceph_inode_xattrs_info { 230 219 /* ··· 244 257 u32 i_time_warp_seq; 245 258 246 259 unsigned i_ceph_flags; 247 - unsigned long i_release_count; 260 + atomic_t i_release_count; 261 + atomic_t i_complete_count; 248 262 249 263 struct ceph_dir_layout i_dir_layout; 250 264 struct ceph_file_layout i_layout; ··· 255 267 struct timespec i_rctime; 256 268 u64 i_rbytes, i_rfiles, i_rsubdirs; 257 269 u64 i_files, i_subdirs; 258 - u64 i_max_offset; /* largest readdir offset, set with D_COMPLETE */ 270 + u64 i_max_offset; /* largest readdir offset, set with complete dir */ 259 271 260 272 struct rb_root i_fragtree; 261 273 struct mutex i_fragtree_mutex; ··· 424 436 #define CEPH_I_FLUSH 8 /* do not delay flush of dirty metadata */ 425 437 #define CEPH_I_NOFLUSH 16 /* do not flush dirty caps */ 426 438 427 - static inline void ceph_i_clear(struct inode *inode, unsigned mask) 439 + static inline void __ceph_dir_set_complete(struct ceph_inode_info *ci, 440 + int release_count) 428 441 { 429 - struct ceph_inode_info *ci = ceph_inode(inode); 430 - 431 - spin_lock(&ci->i_ceph_lock); 432 - ci->i_ceph_flags &= ~mask; 433 - spin_unlock(&ci->i_ceph_lock); 442 + atomic_set(&ci->i_complete_count, release_count); 434 443 } 435 444 436 - static inline void ceph_i_set(struct inode *inode, unsigned mask) 445 + static inline void __ceph_dir_clear_complete(struct ceph_inode_info *ci) 437 446 { 438 - struct ceph_inode_info *ci = ceph_inode(inode); 439 - 440 - spin_lock(&ci->i_ceph_lock); 441 - ci->i_ceph_flags |= mask; 442 - spin_unlock(&ci->i_ceph_lock); 447 + atomic_inc(&ci->i_release_count); 443 448 } 444 449 445 - static inline bool ceph_i_test(struct inode *inode, unsigned mask) 450 + static inline bool __ceph_dir_is_complete(struct ceph_inode_info *ci) 446 451 { 447 - struct ceph_inode_info *ci = ceph_inode(inode); 448 - bool r; 452 + return atomic_read(&ci->i_complete_count) == 453 + atomic_read(&ci->i_release_count); 454 + } 449 455 450 - spin_lock(&ci->i_ceph_lock); 451 - r = (ci->i_ceph_flags & mask) == mask; 452 - spin_unlock(&ci->i_ceph_lock); 453 - return r; 456 + static inline void ceph_dir_clear_complete(struct inode *inode) 457 + { 458 + __ceph_dir_clear_complete(ceph_inode(inode)); 459 + } 460 + 461 + static inline bool ceph_dir_is_complete(struct inode *inode) 462 + { 463 + return __ceph_dir_is_complete(ceph_inode(inode)); 454 464 } 455 465 456 466 ··· 473 487 { 474 488 return ((loff_t)frag << 32) | (loff_t)off; 475 489 } 476 - 477 - /* 478 - * set/clear directory D_COMPLETE flag 479 - */ 480 - void ceph_dir_set_complete(struct inode *inode); 481 - void ceph_dir_clear_complete(struct inode *inode); 482 - bool ceph_dir_test_complete(struct inode *inode); 483 490 484 491 /* 485 492 * caps helpers ··· 563 584 u64 next_offset; /* offset of next chunk (last_name's + 1) */ 564 585 char *last_name; /* last entry in previous chunk */ 565 586 struct dentry *dentry; /* next dentry (for dcache readdir) */ 566 - unsigned long dir_release_count; 587 + int dir_release_count; 567 588 568 589 /* used for -o dirstat read() on directory thing */ 569 590 char *dir_info; ··· 692 713 extern int ceph_inode_holds_cap(struct inode *inode, int mask); 693 714 694 715 extern int ceph_inode_set_size(struct inode *inode, loff_t size); 695 - extern void __ceph_do_pending_vmtruncate(struct inode *inode); 716 + extern void __ceph_do_pending_vmtruncate(struct inode *inode, bool needlock); 696 717 extern void ceph_queue_vmtruncate(struct inode *inode); 697 718 698 719 extern void ceph_queue_invalidate(struct inode *inode); ··· 734 755 extern void ceph_put_cap(struct ceph_mds_client *mdsc, 735 756 struct ceph_cap *cap); 736 757 758 + extern void __queue_cap_release(struct ceph_mds_session *session, u64 ino, 759 + u64 cap_id, u32 migrate_seq, u32 issue_seq); 737 760 extern void ceph_queue_caps_release(struct inode *inode); 738 761 extern int ceph_write_inode(struct inode *inode, struct writeback_control *wbc); 739 762 extern int ceph_fsync(struct file *file, loff_t start, loff_t end,
+18
include/linux/ceph/auth.h
··· 52 52 */ 53 53 int (*create_authorizer)(struct ceph_auth_client *ac, int peer_type, 54 54 struct ceph_auth_handshake *auth); 55 + /* ensure that an existing authorizer is up to date */ 56 + int (*update_authorizer)(struct ceph_auth_client *ac, int peer_type, 57 + struct ceph_auth_handshake *auth); 55 58 int (*verify_authorizer_reply)(struct ceph_auth_client *ac, 56 59 struct ceph_authorizer *a, size_t len); 57 60 void (*destroy_authorizer)(struct ceph_auth_client *ac, ··· 78 75 u64 global_id; /* our unique id in system */ 79 76 const struct ceph_crypto_key *key; /* our secret key */ 80 77 unsigned want_keys; /* which services we want */ 78 + 79 + struct mutex mutex; 81 80 }; 82 81 83 82 extern struct ceph_auth_client *ceph_auth_init(const char *name, ··· 99 94 void *msg_buf, size_t msg_len); 100 95 101 96 extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac); 97 + extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac, 98 + int peer_type, 99 + struct ceph_auth_handshake *auth); 100 + extern void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac, 101 + struct ceph_authorizer *a); 102 + extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac, 103 + int peer_type, 104 + struct ceph_auth_handshake *a); 105 + extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac, 106 + struct ceph_authorizer *a, 107 + size_t len); 108 + extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac, 109 + int peer_type); 102 110 103 111 #endif
+2
include/linux/ceph/ceph_features.h
··· 41 41 */ 42 42 #define CEPH_FEATURES_SUPPORTED_DEFAULT \ 43 43 (CEPH_FEATURE_NOSRCADDR | \ 44 + CEPH_FEATURE_RECONNECT_SEQ | \ 44 45 CEPH_FEATURE_PGID64 | \ 45 46 CEPH_FEATURE_PGPOOL3 | \ 46 47 CEPH_FEATURE_OSDENC | \ ··· 52 51 53 52 #define CEPH_FEATURES_REQUIRED_DEFAULT \ 54 53 (CEPH_FEATURE_NOSRCADDR | \ 54 + CEPH_FEATURE_RECONNECT_SEQ | \ 55 55 CEPH_FEATURE_PGID64 | \ 56 56 CEPH_FEATURE_PGPOOL3 | \ 57 57 CEPH_FEATURE_OSDENC)
+26 -4
include/linux/ceph/decode.h
··· 8 8 9 9 #include <linux/ceph/types.h> 10 10 11 + /* This seemed to be the easiest place to define these */ 12 + 13 + #define U8_MAX ((u8)(~0U)) 14 + #define U16_MAX ((u16)(~0U)) 15 + #define U32_MAX ((u32)(~0U)) 16 + #define U64_MAX ((u64)(~0ULL)) 17 + 18 + #define S8_MAX ((s8)(U8_MAX >> 1)) 19 + #define S16_MAX ((s16)(U16_MAX >> 1)) 20 + #define S32_MAX ((s32)(U32_MAX >> 1)) 21 + #define S64_MAX ((s64)(U64_MAX >> 1LL)) 22 + 23 + #define S8_MIN ((s8)(-S8_MAX - 1)) 24 + #define S16_MIN ((s16)(-S16_MAX - 1)) 25 + #define S32_MIN ((s32)(-S32_MAX - 1)) 26 + #define S64_MIN ((s64)(-S64_MAX - 1LL)) 27 + 11 28 /* 12 29 * in all cases, 13 30 * void **p pointer to position pointer ··· 154 137 static inline void ceph_decode_timespec(struct timespec *ts, 155 138 const struct ceph_timespec *tv) 156 139 { 157 - ts->tv_sec = le32_to_cpu(tv->tv_sec); 158 - ts->tv_nsec = le32_to_cpu(tv->tv_nsec); 140 + ts->tv_sec = (__kernel_time_t)le32_to_cpu(tv->tv_sec); 141 + ts->tv_nsec = (long)le32_to_cpu(tv->tv_nsec); 159 142 } 160 143 static inline void ceph_encode_timespec(struct ceph_timespec *tv, 161 144 const struct timespec *ts) 162 145 { 163 - tv->tv_sec = cpu_to_le32(ts->tv_sec); 164 - tv->tv_nsec = cpu_to_le32(ts->tv_nsec); 146 + BUG_ON(ts->tv_sec < 0); 147 + BUG_ON(ts->tv_sec > (__kernel_time_t)U32_MAX); 148 + BUG_ON(ts->tv_nsec < 0); 149 + BUG_ON(ts->tv_nsec > (long)U32_MAX); 150 + 151 + tv->tv_sec = cpu_to_le32((u32)ts->tv_sec); 152 + tv->tv_nsec = cpu_to_le32((u32)ts->tv_nsec); 165 153 } 166 154 167 155 /*
+6 -25
include/linux/ceph/libceph.h
··· 66 66 #define CEPH_OSD_IDLE_TTL_DEFAULT 60 67 67 68 68 #define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) 69 + #define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024) 69 70 #define CEPH_MSG_MAX_DATA_LEN (16*1024*1024) 70 71 71 72 #define CEPH_AUTH_NAME_DEFAULT "guest" ··· 157 156 u64 snaps[]; 158 157 }; 159 158 160 - static inline struct ceph_snap_context * 161 - ceph_get_snap_context(struct ceph_snap_context *sc) 162 - { 163 - /* 164 - printk("get_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref), 165 - atomic_read(&sc->nref)+1); 166 - */ 167 - if (sc) 168 - atomic_inc(&sc->nref); 169 - return sc; 170 - } 171 - 172 - static inline void ceph_put_snap_context(struct ceph_snap_context *sc) 173 - { 174 - if (!sc) 175 - return; 176 - /* 177 - printk("put_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref), 178 - atomic_read(&sc->nref)-1); 179 - */ 180 - if (atomic_dec_and_test(&sc->nref)) { 181 - /*printk(" deleting snap_context %p\n", sc);*/ 182 - kfree(sc); 183 - } 184 - } 159 + extern struct ceph_snap_context *ceph_create_snap_context(u32 snap_count, 160 + gfp_t gfp_flags); 161 + extern struct ceph_snap_context *ceph_get_snap_context( 162 + struct ceph_snap_context *sc); 163 + extern void ceph_put_snap_context(struct ceph_snap_context *sc); 185 164 186 165 /* 187 166 * calculate the number of pages a given length and offset map onto,
+85 -19
include/linux/ceph/messenger.h
··· 64 64 u32 required_features; 65 65 }; 66 66 67 + enum ceph_msg_data_type { 68 + CEPH_MSG_DATA_NONE, /* message contains no data payload */ 69 + CEPH_MSG_DATA_PAGES, /* data source/destination is a page array */ 70 + CEPH_MSG_DATA_PAGELIST, /* data source/destination is a pagelist */ 71 + #ifdef CONFIG_BLOCK 72 + CEPH_MSG_DATA_BIO, /* data source/destination is a bio list */ 73 + #endif /* CONFIG_BLOCK */ 74 + }; 75 + 76 + static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type) 77 + { 78 + switch (type) { 79 + case CEPH_MSG_DATA_NONE: 80 + case CEPH_MSG_DATA_PAGES: 81 + case CEPH_MSG_DATA_PAGELIST: 82 + #ifdef CONFIG_BLOCK 83 + case CEPH_MSG_DATA_BIO: 84 + #endif /* CONFIG_BLOCK */ 85 + return true; 86 + default: 87 + return false; 88 + } 89 + } 90 + 91 + struct ceph_msg_data { 92 + struct list_head links; /* ceph_msg->data */ 93 + enum ceph_msg_data_type type; 94 + union { 95 + #ifdef CONFIG_BLOCK 96 + struct { 97 + struct bio *bio; 98 + size_t bio_length; 99 + }; 100 + #endif /* CONFIG_BLOCK */ 101 + struct { 102 + struct page **pages; /* NOT OWNER. */ 103 + size_t length; /* total # bytes */ 104 + unsigned int alignment; /* first page */ 105 + }; 106 + struct ceph_pagelist *pagelist; 107 + }; 108 + }; 109 + 110 + struct ceph_msg_data_cursor { 111 + size_t total_resid; /* across all data items */ 112 + struct list_head *data_head; /* = &ceph_msg->data */ 113 + 114 + struct ceph_msg_data *data; /* current data item */ 115 + size_t resid; /* bytes not yet consumed */ 116 + bool last_piece; /* current is last piece */ 117 + bool need_crc; /* crc update needed */ 118 + union { 119 + #ifdef CONFIG_BLOCK 120 + struct { /* bio */ 121 + struct bio *bio; /* bio from list */ 122 + unsigned int vector_index; /* vector from bio */ 123 + unsigned int vector_offset; /* bytes from vector */ 124 + }; 125 + #endif /* CONFIG_BLOCK */ 126 + struct { /* pages */ 127 + unsigned int page_offset; /* offset in page */ 128 + unsigned short page_index; /* index in array */ 129 + unsigned short page_count; /* pages in array */ 130 + }; 131 + struct { /* pagelist */ 132 + struct page *page; /* page from list */ 133 + size_t offset; /* bytes from list */ 134 + }; 135 + }; 136 + }; 137 + 67 138 /* 68 139 * a single message. it contains a header (src, dest, message type, etc.), 69 140 * footer (crc values, mainly), a "front" message body, and possibly a ··· 145 74 struct ceph_msg_footer footer; /* footer */ 146 75 struct kvec front; /* unaligned blobs of message */ 147 76 struct ceph_buffer *middle; 148 - struct page **pages; /* data payload. NOT OWNER. */ 149 - unsigned nr_pages; /* size of page array */ 150 - unsigned page_alignment; /* io offset in first page */ 151 - struct ceph_pagelist *pagelist; /* instead of pages */ 77 + 78 + size_t data_length; 79 + struct list_head data; 80 + struct ceph_msg_data_cursor cursor; 152 81 153 82 struct ceph_connection *con; 154 - struct list_head list_head; 83 + struct list_head list_head; /* links for connection lists */ 155 84 156 85 struct kref kref; 157 - #ifdef CONFIG_BLOCK 158 - struct bio *bio; /* instead of pages/pagelist */ 159 - struct bio *bio_iter; /* bio iterator */ 160 - int bio_seg; /* current bio segment */ 161 - #endif /* CONFIG_BLOCK */ 162 - struct ceph_pagelist *trail; /* the trailing part of the data */ 163 86 bool front_is_vmalloc; 164 87 bool more_to_follow; 165 88 bool needs_out_seq; ··· 161 96 unsigned long ack_stamp; /* tx: when we were acked */ 162 97 163 98 struct ceph_msgpool *pool; 164 - }; 165 - 166 - struct ceph_msg_pos { 167 - int page, page_pos; /* which page; offset in page */ 168 - int data_pos; /* offset in data payload */ 169 - bool did_page_crc; /* true if we've calculated crc for current page */ 170 99 }; 171 100 172 101 /* ceph connection fault delay defaults, for exponential backoff */ ··· 220 161 struct ceph_msg *out_msg; /* sending message (== tail of 221 162 out_sent) */ 222 163 bool out_msg_done; 223 - struct ceph_msg_pos out_msg_pos; 224 164 225 165 struct kvec out_kvec[8], /* sending header/footer data */ 226 166 *out_kvec_cur; ··· 233 175 /* message in temps */ 234 176 struct ceph_msg_header in_hdr; 235 177 struct ceph_msg *in_msg; 236 - struct ceph_msg_pos in_msg_pos; 237 178 u32 in_front_crc, in_middle_crc, in_data_crc; /* calculated crc */ 238 179 239 180 char in_tag; /* protocol control byte */ ··· 274 217 extern void ceph_msg_revoke_incoming(struct ceph_msg *msg); 275 218 276 219 extern void ceph_con_keepalive(struct ceph_connection *con); 220 + 221 + extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, 222 + size_t length, size_t alignment); 223 + extern void ceph_msg_data_add_pagelist(struct ceph_msg *msg, 224 + struct ceph_pagelist *pagelist); 225 + #ifdef CONFIG_BLOCK 226 + extern void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio, 227 + size_t length); 228 + #endif /* CONFIG_BLOCK */ 277 229 278 230 extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, 279 231 bool can_fail);
+1
include/linux/ceph/msgr.h
··· 87 87 #define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */ 88 88 #define CEPH_MSGR_TAG_BADAUTHORIZER 11 /* bad authorizer */ 89 89 #define CEPH_MSGR_TAG_FEATURES 12 /* insufficient features */ 90 + #define CEPH_MSGR_TAG_SEQ 13 /* 64-bit int follows with seen seq number */ 90 91 91 92 92 93 /*
+135 -69
include/linux/ceph/osd_client.h
··· 29 29 */ 30 30 typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *, 31 31 struct ceph_msg *); 32 + typedef void (*ceph_osdc_unsafe_callback_t)(struct ceph_osd_request *, bool); 32 33 33 34 /* a given osd we're communicating with */ 34 35 struct ceph_osd { ··· 49 48 }; 50 49 51 50 52 - #define CEPH_OSD_MAX_OP 10 51 + #define CEPH_OSD_MAX_OP 2 52 + 53 + enum ceph_osd_data_type { 54 + CEPH_OSD_DATA_TYPE_NONE = 0, 55 + CEPH_OSD_DATA_TYPE_PAGES, 56 + CEPH_OSD_DATA_TYPE_PAGELIST, 57 + #ifdef CONFIG_BLOCK 58 + CEPH_OSD_DATA_TYPE_BIO, 59 + #endif /* CONFIG_BLOCK */ 60 + }; 61 + 62 + struct ceph_osd_data { 63 + enum ceph_osd_data_type type; 64 + union { 65 + struct { 66 + struct page **pages; 67 + u64 length; 68 + u32 alignment; 69 + bool pages_from_pool; 70 + bool own_pages; 71 + }; 72 + struct ceph_pagelist *pagelist; 73 + #ifdef CONFIG_BLOCK 74 + struct { 75 + struct bio *bio; /* list of bios */ 76 + size_t bio_length; /* total in list */ 77 + }; 78 + #endif /* CONFIG_BLOCK */ 79 + }; 80 + }; 81 + 82 + struct ceph_osd_req_op { 83 + u16 op; /* CEPH_OSD_OP_* */ 84 + u32 payload_len; 85 + union { 86 + struct ceph_osd_data raw_data_in; 87 + struct { 88 + u64 offset, length; 89 + u64 truncate_size; 90 + u32 truncate_seq; 91 + struct ceph_osd_data osd_data; 92 + } extent; 93 + struct { 94 + const char *class_name; 95 + const char *method_name; 96 + struct ceph_osd_data request_info; 97 + struct ceph_osd_data request_data; 98 + struct ceph_osd_data response_data; 99 + __u8 class_len; 100 + __u8 method_len; 101 + __u8 argc; 102 + } cls; 103 + struct { 104 + u64 cookie; 105 + u64 ver; 106 + u32 prot_ver; 107 + u32 timeout; 108 + __u8 flag; 109 + } watch; 110 + }; 111 + }; 53 112 54 113 /* an in-flight request */ 55 114 struct ceph_osd_request { ··· 124 63 int r_pg_osds[CEPH_PG_MAX_SIZE]; 125 64 int r_num_pg_osds; 126 65 127 - struct ceph_connection *r_con_filling_msg; 128 - 129 66 struct ceph_msg *r_request, *r_reply; 130 67 int r_flags; /* any additional flags for the osd */ 131 68 u32 r_sent; /* >0 if r_request is sending/sent */ 132 - int r_num_ops; 133 69 134 - /* encoded message content */ 135 - struct ceph_osd_op *r_request_ops; 70 + /* request osd ops array */ 71 + unsigned int r_num_ops; 72 + struct ceph_osd_req_op r_ops[CEPH_OSD_MAX_OP]; 73 + 136 74 /* these are updated on each send */ 137 75 __le32 *r_request_osdmap_epoch; 138 76 __le32 *r_request_flags; ··· 145 85 s32 r_reply_op_result[CEPH_OSD_MAX_OP]; 146 86 int r_got_reply; 147 87 int r_linger; 88 + int r_completed; 148 89 149 90 struct ceph_osd_client *r_osdc; 150 91 struct kref r_kref; 151 92 bool r_mempool; 152 93 struct completion r_completion, r_safe_completion; 153 - ceph_osdc_callback_t r_callback, r_safe_callback; 94 + ceph_osdc_callback_t r_callback; 95 + ceph_osdc_unsafe_callback_t r_unsafe_callback; 154 96 struct ceph_eversion r_reassert_version; 155 97 struct list_head r_unsafe_item; 156 98 ··· 166 104 167 105 struct ceph_file_layout r_file_layout; 168 106 struct ceph_snap_context *r_snapc; /* snap context for writes */ 169 - unsigned r_num_pages; /* size of page array (follows) */ 170 - unsigned r_page_alignment; /* io offset in first page */ 171 - struct page **r_pages; /* pages for data payload */ 172 - int r_pages_from_pool; 173 - int r_own_pages; /* if true, i own page list */ 174 - #ifdef CONFIG_BLOCK 175 - struct bio *r_bio; /* instead of pages */ 176 - #endif 177 - 178 - struct ceph_pagelist r_trail; /* trailing part of the data */ 179 107 }; 180 108 181 109 struct ceph_osd_event { ··· 224 172 struct workqueue_struct *notify_wq; 225 173 }; 226 174 227 - struct ceph_osd_req_op { 228 - u16 op; /* CEPH_OSD_OP_* */ 229 - u32 payload_len; 230 - union { 231 - struct { 232 - u64 offset, length; 233 - u64 truncate_size; 234 - u32 truncate_seq; 235 - } extent; 236 - struct { 237 - const char *name; 238 - const char *val; 239 - u32 name_len; 240 - u32 value_len; 241 - __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */ 242 - __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */ 243 - } xattr; 244 - struct { 245 - const char *class_name; 246 - const char *method_name; 247 - const char *indata; 248 - u32 indata_len; 249 - __u8 class_len; 250 - __u8 method_len; 251 - __u8 argc; 252 - } cls; 253 - struct { 254 - u64 cookie; 255 - u64 count; 256 - } pgls; 257 - struct { 258 - u64 snapid; 259 - } snap; 260 - struct { 261 - u64 cookie; 262 - u64 ver; 263 - u32 prot_ver; 264 - u32 timeout; 265 - __u8 flag; 266 - } watch; 267 - }; 268 - }; 175 + extern int ceph_osdc_setup(void); 176 + extern void ceph_osdc_cleanup(void); 269 177 270 178 extern int ceph_osdc_init(struct ceph_osd_client *osdc, 271 179 struct ceph_client *client); ··· 236 224 extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc, 237 225 struct ceph_msg *msg); 238 226 227 + extern void osd_req_op_init(struct ceph_osd_request *osd_req, 228 + unsigned int which, u16 opcode); 229 + 230 + extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *, 231 + unsigned int which, 232 + struct page **pages, u64 length, 233 + u32 alignment, bool pages_from_pool, 234 + bool own_pages); 235 + 236 + extern void osd_req_op_extent_init(struct ceph_osd_request *osd_req, 237 + unsigned int which, u16 opcode, 238 + u64 offset, u64 length, 239 + u64 truncate_size, u32 truncate_seq); 240 + extern void osd_req_op_extent_update(struct ceph_osd_request *osd_req, 241 + unsigned int which, u64 length); 242 + 243 + extern struct ceph_osd_data *osd_req_op_extent_osd_data( 244 + struct ceph_osd_request *osd_req, 245 + unsigned int which); 246 + extern struct ceph_osd_data *osd_req_op_cls_response_data( 247 + struct ceph_osd_request *osd_req, 248 + unsigned int which); 249 + 250 + extern void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *, 251 + unsigned int which, 252 + struct page **pages, u64 length, 253 + u32 alignment, bool pages_from_pool, 254 + bool own_pages); 255 + extern void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *, 256 + unsigned int which, 257 + struct ceph_pagelist *pagelist); 258 + #ifdef CONFIG_BLOCK 259 + extern void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *, 260 + unsigned int which, 261 + struct bio *bio, size_t bio_length); 262 + #endif /* CONFIG_BLOCK */ 263 + 264 + extern void osd_req_op_cls_request_data_pagelist(struct ceph_osd_request *, 265 + unsigned int which, 266 + struct ceph_pagelist *pagelist); 267 + extern void osd_req_op_cls_request_data_pages(struct ceph_osd_request *, 268 + unsigned int which, 269 + struct page **pages, u64 length, 270 + u32 alignment, bool pages_from_pool, 271 + bool own_pages); 272 + extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *, 273 + unsigned int which, 274 + struct page **pages, u64 length, 275 + u32 alignment, bool pages_from_pool, 276 + bool own_pages); 277 + 278 + extern void osd_req_op_cls_init(struct ceph_osd_request *osd_req, 279 + unsigned int which, u16 opcode, 280 + const char *class, const char *method); 281 + extern void osd_req_op_watch_init(struct ceph_osd_request *osd_req, 282 + unsigned int which, u16 opcode, 283 + u64 cookie, u64 version, int flag); 284 + 239 285 extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, 240 286 struct ceph_snap_context *snapc, 241 - unsigned int num_op, 287 + unsigned int num_ops, 242 288 bool use_mempool, 243 289 gfp_t gfp_flags); 244 290 245 - extern void ceph_osdc_build_request(struct ceph_osd_request *req, 246 - u64 off, u64 len, 247 - unsigned int num_op, 248 - struct ceph_osd_req_op *src_ops, 291 + extern void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off, 249 292 struct ceph_snap_context *snapc, 250 293 u64 snap_id, 251 294 struct timespec *mtime); ··· 308 241 extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, 309 242 struct ceph_file_layout *layout, 310 243 struct ceph_vino vino, 311 - u64 offset, u64 *len, int op, int flags, 244 + u64 offset, u64 *len, 245 + int num_ops, int opcode, int flags, 312 246 struct ceph_snap_context *snapc, 313 - int do_sync, u32 truncate_seq, 314 - u64 truncate_size, 315 - struct timespec *mtime, 316 - bool use_mempool, int page_align); 247 + u32 truncate_seq, u64 truncate_size, 248 + bool use_mempool); 317 249 318 250 extern void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc, 319 251 struct ceph_osd_request *req);
+26 -4
include/linux/ceph/osdmap.h
··· 3 3 4 4 #include <linux/rbtree.h> 5 5 #include <linux/ceph/types.h> 6 + #include <linux/ceph/decode.h> 6 7 #include <linux/ceph/ceph_fs.h> 7 8 #include <linux/crush/crush.h> 8 9 ··· 120 119 return &map->osd_addr[osd]; 121 120 } 122 121 122 + static inline int ceph_decode_pgid(void **p, void *end, struct ceph_pg *pgid) 123 + { 124 + __u8 version; 125 + 126 + if (!ceph_has_room(p, end, 1 + 8 + 4 + 4)) { 127 + pr_warning("incomplete pg encoding"); 128 + 129 + return -EINVAL; 130 + } 131 + version = ceph_decode_8(p); 132 + if (version > 1) { 133 + pr_warning("do not understand pg encoding %d > 1", 134 + (int)version); 135 + return -EINVAL; 136 + } 137 + 138 + pgid->pool = ceph_decode_64(p); 139 + pgid->seed = ceph_decode_32(p); 140 + *p += 4; /* skip deprecated preferred value */ 141 + 142 + return 0; 143 + } 144 + 123 145 extern struct ceph_osdmap *osdmap_decode(void **p, void *end); 124 146 extern struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, 125 147 struct ceph_osdmap *map, ··· 155 131 u64 *bno, u64 *oxoff, u64 *oxlen); 156 132 157 133 /* calculate mapping of object to a placement group */ 158 - extern int ceph_calc_object_layout(struct ceph_pg *pg, 159 - const char *oid, 160 - struct ceph_file_layout *fl, 161 - struct ceph_osdmap *osdmap); 134 + extern int ceph_calc_ceph_pg(struct ceph_pg *pg, const char *oid, 135 + struct ceph_osdmap *osdmap, uint64_t pool); 162 136 extern int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, 163 137 struct ceph_pg pgid, 164 138 int *acting);
+1 -1
net/ceph/Makefile
··· 11 11 crypto.o armor.o \ 12 12 auth_x.o \ 13 13 ceph_fs.o ceph_strings.o ceph_hash.o \ 14 - pagevec.o 14 + pagevec.o snapshot.o 15 15
+99 -18
net/ceph/auth.c
··· 47 47 if (!ac) 48 48 goto out; 49 49 50 + mutex_init(&ac->mutex); 50 51 ac->negotiating = true; 51 52 if (name) 52 53 ac->name = name; ··· 74 73 */ 75 74 void ceph_auth_reset(struct ceph_auth_client *ac) 76 75 { 76 + mutex_lock(&ac->mutex); 77 77 dout("auth_reset %p\n", ac); 78 78 if (ac->ops && !ac->negotiating) 79 79 ac->ops->reset(ac); 80 80 ac->negotiating = true; 81 + mutex_unlock(&ac->mutex); 81 82 } 82 83 83 84 int ceph_entity_name_encode(const char *name, void **p, void *end) ··· 105 102 int i, num; 106 103 int ret; 107 104 105 + mutex_lock(&ac->mutex); 108 106 dout("auth_build_hello\n"); 109 107 monhdr->have_version = 0; 110 108 monhdr->session_mon = cpu_to_le16(-1); ··· 126 122 127 123 ret = ceph_entity_name_encode(ac->name, &p, end); 128 124 if (ret < 0) 129 - return ret; 125 + goto out; 130 126 ceph_decode_need(&p, end, sizeof(u64), bad); 131 127 ceph_encode_64(&p, ac->global_id); 132 128 133 129 ceph_encode_32(&lenp, p - lenp - sizeof(u32)); 134 - return p - buf; 130 + ret = p - buf; 131 + out: 132 + mutex_unlock(&ac->mutex); 133 + return ret; 135 134 136 135 bad: 137 - return -ERANGE; 136 + ret = -ERANGE; 137 + goto out; 138 138 } 139 139 140 140 static int ceph_build_auth_request(struct ceph_auth_client *ac, ··· 159 151 if (ret < 0) { 160 152 pr_err("error %d building auth method %s request\n", ret, 161 153 ac->ops->name); 162 - return ret; 154 + goto out; 163 155 } 164 156 dout(" built request %d bytes\n", ret); 165 157 ceph_encode_32(&p, ret); 166 - return p + ret - msg_buf; 158 + ret = p + ret - msg_buf; 159 + out: 160 + return ret; 167 161 } 168 162 169 163 /* ··· 186 176 int result_msg_len; 187 177 int ret = -EINVAL; 188 178 179 + mutex_lock(&ac->mutex); 189 180 dout("handle_auth_reply %p %p\n", p, end); 190 181 ceph_decode_need(&p, end, sizeof(u32) * 3 + sizeof(u64), bad); 191 182 protocol = ceph_decode_32(&p); ··· 238 227 239 228 ret = ac->ops->handle_reply(ac, result, payload, payload_end); 240 229 if (ret == -EAGAIN) { 241 - return ceph_build_auth_request(ac, reply_buf, reply_len); 230 + ret = ceph_build_auth_request(ac, reply_buf, reply_len); 242 231 } else if (ret) { 243 232 pr_err("auth method '%s' error %d\n", ac->ops->name, ret); 244 - return ret; 245 233 } 246 - return 0; 234 + 235 + out: 236 + mutex_unlock(&ac->mutex); 237 + return ret; 247 238 248 239 bad: 249 240 pr_err("failed to decode auth msg\n"); 250 - out: 251 - return ret; 241 + ret = -EINVAL; 242 + goto out; 252 243 } 253 244 254 245 int ceph_build_auth(struct ceph_auth_client *ac, 255 246 void *msg_buf, size_t msg_len) 256 247 { 248 + int ret = 0; 249 + 250 + mutex_lock(&ac->mutex); 257 251 if (!ac->protocol) 258 - return ceph_auth_build_hello(ac, msg_buf, msg_len); 259 - BUG_ON(!ac->ops); 260 - if (ac->ops->should_authenticate(ac)) 261 - return ceph_build_auth_request(ac, msg_buf, msg_len); 262 - return 0; 252 + ret = ceph_auth_build_hello(ac, msg_buf, msg_len); 253 + else if (ac->ops->should_authenticate(ac)) 254 + ret = ceph_build_auth_request(ac, msg_buf, msg_len); 255 + mutex_unlock(&ac->mutex); 256 + return ret; 263 257 } 264 258 265 259 int ceph_auth_is_authenticated(struct ceph_auth_client *ac) 266 260 { 267 - if (!ac->ops) 268 - return 0; 269 - return ac->ops->is_authenticated(ac); 261 + int ret = 0; 262 + 263 + mutex_lock(&ac->mutex); 264 + if (ac->ops) 265 + ret = ac->ops->is_authenticated(ac); 266 + mutex_unlock(&ac->mutex); 267 + return ret; 270 268 } 269 + EXPORT_SYMBOL(ceph_auth_is_authenticated); 270 + 271 + int ceph_auth_create_authorizer(struct ceph_auth_client *ac, 272 + int peer_type, 273 + struct ceph_auth_handshake *auth) 274 + { 275 + int ret = 0; 276 + 277 + mutex_lock(&ac->mutex); 278 + if (ac->ops && ac->ops->create_authorizer) 279 + ret = ac->ops->create_authorizer(ac, peer_type, auth); 280 + mutex_unlock(&ac->mutex); 281 + return ret; 282 + } 283 + EXPORT_SYMBOL(ceph_auth_create_authorizer); 284 + 285 + void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac, 286 + struct ceph_authorizer *a) 287 + { 288 + mutex_lock(&ac->mutex); 289 + if (ac->ops && ac->ops->destroy_authorizer) 290 + ac->ops->destroy_authorizer(ac, a); 291 + mutex_unlock(&ac->mutex); 292 + } 293 + EXPORT_SYMBOL(ceph_auth_destroy_authorizer); 294 + 295 + int ceph_auth_update_authorizer(struct ceph_auth_client *ac, 296 + int peer_type, 297 + struct ceph_auth_handshake *a) 298 + { 299 + int ret = 0; 300 + 301 + mutex_lock(&ac->mutex); 302 + if (ac->ops && ac->ops->update_authorizer) 303 + ret = ac->ops->update_authorizer(ac, peer_type, a); 304 + mutex_unlock(&ac->mutex); 305 + return ret; 306 + } 307 + EXPORT_SYMBOL(ceph_auth_update_authorizer); 308 + 309 + int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac, 310 + struct ceph_authorizer *a, size_t len) 311 + { 312 + int ret = 0; 313 + 314 + mutex_lock(&ac->mutex); 315 + if (ac->ops && ac->ops->verify_authorizer_reply) 316 + ret = ac->ops->verify_authorizer_reply(ac, a, len); 317 + mutex_unlock(&ac->mutex); 318 + return ret; 319 + } 320 + EXPORT_SYMBOL(ceph_auth_verify_authorizer_reply); 321 + 322 + void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac, int peer_type) 323 + { 324 + mutex_lock(&ac->mutex); 325 + if (ac->ops && ac->ops->invalidate_authorizer) 326 + ac->ops->invalidate_authorizer(ac, peer_type); 327 + mutex_unlock(&ac->mutex); 328 + } 329 + EXPORT_SYMBOL(ceph_auth_invalidate_authorizer);
+23 -1
net/ceph/auth_x.c
··· 298 298 return -ENOMEM; 299 299 } 300 300 au->service = th->service; 301 + au->secret_id = th->secret_id; 301 302 302 303 msg_a = au->buf->vec.iov_base; 303 304 msg_a->struct_v = 1; ··· 556 555 return 0; 557 556 } 558 557 558 + static int ceph_x_update_authorizer( 559 + struct ceph_auth_client *ac, int peer_type, 560 + struct ceph_auth_handshake *auth) 561 + { 562 + struct ceph_x_authorizer *au; 563 + struct ceph_x_ticket_handler *th; 564 + 565 + th = get_ticket_handler(ac, peer_type); 566 + if (IS_ERR(th)) 567 + return PTR_ERR(th); 568 + 569 + au = (struct ceph_x_authorizer *)auth->authorizer; 570 + if (au->secret_id < th->secret_id) { 571 + dout("ceph_x_update_authorizer service %u secret %llu < %llu\n", 572 + au->service, au->secret_id, th->secret_id); 573 + return ceph_x_build_authorizer(ac, th, au); 574 + } 575 + return 0; 576 + } 577 + 559 578 static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac, 560 579 struct ceph_authorizer *a, size_t len) 561 580 { ··· 651 630 652 631 th = get_ticket_handler(ac, peer_type); 653 632 if (!IS_ERR(th)) 654 - remove_ticket_handler(ac, th); 633 + memset(&th->validity, 0, sizeof(th->validity)); 655 634 } 656 635 657 636 ··· 662 641 .build_request = ceph_x_build_request, 663 642 .handle_reply = ceph_x_handle_reply, 664 643 .create_authorizer = ceph_x_create_authorizer, 644 + .update_authorizer = ceph_x_update_authorizer, 665 645 .verify_authorizer_reply = ceph_x_verify_authorizer_reply, 666 646 .destroy_authorizer = ceph_x_destroy_authorizer, 667 647 .invalidate_authorizer = ceph_x_invalidate_authorizer,
+1
net/ceph/auth_x.h
··· 29 29 struct ceph_buffer *buf; 30 30 unsigned int service; 31 31 u64 nonce; 32 + u64 secret_id; 32 33 char reply_buf[128]; /* big enough for encrypted blob */ 33 34 }; 34 35
+7
net/ceph/ceph_common.c
··· 606 606 if (ret < 0) 607 607 goto out_crypto; 608 608 609 + ret = ceph_osdc_setup(); 610 + if (ret < 0) 611 + goto out_msgr; 612 + 609 613 pr_info("loaded (mon/osd proto %d/%d)\n", 610 614 CEPH_MONC_PROTOCOL, CEPH_OSDC_PROTOCOL); 611 615 612 616 return 0; 613 617 618 + out_msgr: 619 + ceph_msgr_exit(); 614 620 out_crypto: 615 621 ceph_crypto_shutdown(); 616 622 out_debugfs: ··· 628 622 static void __exit exit_ceph_lib(void) 629 623 { 630 624 dout("exit_ceph_lib\n"); 625 + ceph_osdc_cleanup(); 631 626 ceph_msgr_exit(); 632 627 ceph_crypto_shutdown(); 633 628 ceph_debugfs_cleanup();
+2 -2
net/ceph/debugfs.c
··· 123 123 mutex_lock(&osdc->request_mutex); 124 124 for (p = rb_first(&osdc->requests); p; p = rb_next(p)) { 125 125 struct ceph_osd_request *req; 126 + unsigned int i; 126 127 int opcode; 127 - int i; 128 128 129 129 req = rb_entry(p, struct ceph_osd_request, r_node); 130 130 ··· 142 142 seq_printf(s, "\t"); 143 143 144 144 for (i = 0; i < req->r_num_ops; i++) { 145 - opcode = le16_to_cpu(req->r_request_ops[i].op); 145 + opcode = req->r_ops[i].op; 146 146 seq_printf(s, "\t%s", ceph_osd_op_name(opcode)); 147 147 } 148 148
+712 -319
net/ceph/messenger.c
··· 21 21 #include <linux/ceph/pagelist.h> 22 22 #include <linux/export.h> 23 23 24 + #define list_entry_next(pos, member) \ 25 + list_entry(pos->member.next, typeof(*pos), member) 26 + 24 27 /* 25 28 * Ceph uses the messenger to exchange ceph_msg messages with other 26 29 * hosts in the system. The messenger provides ordered and reliable ··· 152 149 return test_and_set_bit(con_flag, &con->flags); 153 150 } 154 151 152 + /* Slab caches for frequently-allocated structures */ 153 + 154 + static struct kmem_cache *ceph_msg_cache; 155 + static struct kmem_cache *ceph_msg_data_cache; 156 + 155 157 /* static tag bytes (protocol control messages) */ 156 158 static char tag_msg = CEPH_MSGR_TAG_MSG; 157 159 static char tag_ack = CEPH_MSGR_TAG_ACK; ··· 231 223 */ 232 224 static struct workqueue_struct *ceph_msgr_wq; 233 225 226 + static int ceph_msgr_slab_init(void) 227 + { 228 + BUG_ON(ceph_msg_cache); 229 + ceph_msg_cache = kmem_cache_create("ceph_msg", 230 + sizeof (struct ceph_msg), 231 + __alignof__(struct ceph_msg), 0, NULL); 232 + 233 + if (!ceph_msg_cache) 234 + return -ENOMEM; 235 + 236 + BUG_ON(ceph_msg_data_cache); 237 + ceph_msg_data_cache = kmem_cache_create("ceph_msg_data", 238 + sizeof (struct ceph_msg_data), 239 + __alignof__(struct ceph_msg_data), 240 + 0, NULL); 241 + if (ceph_msg_data_cache) 242 + return 0; 243 + 244 + kmem_cache_destroy(ceph_msg_cache); 245 + ceph_msg_cache = NULL; 246 + 247 + return -ENOMEM; 248 + } 249 + 250 + static void ceph_msgr_slab_exit(void) 251 + { 252 + BUG_ON(!ceph_msg_data_cache); 253 + kmem_cache_destroy(ceph_msg_data_cache); 254 + ceph_msg_data_cache = NULL; 255 + 256 + BUG_ON(!ceph_msg_cache); 257 + kmem_cache_destroy(ceph_msg_cache); 258 + ceph_msg_cache = NULL; 259 + } 260 + 234 261 static void _ceph_msgr_exit(void) 235 262 { 236 263 if (ceph_msgr_wq) { 237 264 destroy_workqueue(ceph_msgr_wq); 238 265 ceph_msgr_wq = NULL; 239 266 } 267 + 268 + ceph_msgr_slab_exit(); 240 269 241 270 BUG_ON(zero_page == NULL); 242 271 kunmap(zero_page); ··· 286 241 BUG_ON(zero_page != NULL); 287 242 zero_page = ZERO_PAGE(0); 288 243 page_cache_get(zero_page); 244 + 245 + if (ceph_msgr_slab_init()) 246 + return -ENOMEM; 289 247 290 248 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0); 291 249 if (ceph_msgr_wq) ··· 519 471 return r; 520 472 } 521 473 474 + static int ceph_tcp_recvpage(struct socket *sock, struct page *page, 475 + int page_offset, size_t length) 476 + { 477 + void *kaddr; 478 + int ret; 479 + 480 + BUG_ON(page_offset + length > PAGE_SIZE); 481 + 482 + kaddr = kmap(page); 483 + BUG_ON(!kaddr); 484 + ret = ceph_tcp_recvmsg(sock, kaddr + page_offset, length); 485 + kunmap(page); 486 + 487 + return ret; 488 + } 489 + 522 490 /* 523 491 * write something. @more is true if caller will be sending more data 524 492 * shortly. ··· 557 493 } 558 494 559 495 static int ceph_tcp_sendpage(struct socket *sock, struct page *page, 560 - int offset, size_t size, int more) 496 + int offset, size_t size, bool more) 561 497 { 562 498 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR); 563 499 int ret; ··· 761 697 } 762 698 763 699 #ifdef CONFIG_BLOCK 764 - static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg) 700 + 701 + /* 702 + * For a bio data item, a piece is whatever remains of the next 703 + * entry in the current bio iovec, or the first entry in the next 704 + * bio in the list. 705 + */ 706 + static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor, 707 + size_t length) 765 708 { 766 - if (!bio) { 767 - *iter = NULL; 768 - *seg = 0; 769 - return; 770 - } 771 - *iter = bio; 772 - *seg = bio->bi_idx; 709 + struct ceph_msg_data *data = cursor->data; 710 + struct bio *bio; 711 + 712 + BUG_ON(data->type != CEPH_MSG_DATA_BIO); 713 + 714 + bio = data->bio; 715 + BUG_ON(!bio); 716 + BUG_ON(!bio->bi_vcnt); 717 + 718 + cursor->resid = min(length, data->bio_length); 719 + cursor->bio = bio; 720 + cursor->vector_index = 0; 721 + cursor->vector_offset = 0; 722 + cursor->last_piece = length <= bio->bi_io_vec[0].bv_len; 773 723 } 774 724 775 - static void iter_bio_next(struct bio **bio_iter, int *seg) 725 + static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor, 726 + size_t *page_offset, 727 + size_t *length) 776 728 { 777 - if (*bio_iter == NULL) 778 - return; 729 + struct ceph_msg_data *data = cursor->data; 730 + struct bio *bio; 731 + struct bio_vec *bio_vec; 732 + unsigned int index; 779 733 780 - BUG_ON(*seg >= (*bio_iter)->bi_vcnt); 734 + BUG_ON(data->type != CEPH_MSG_DATA_BIO); 781 735 782 - (*seg)++; 783 - if (*seg == (*bio_iter)->bi_vcnt) 784 - init_bio_iter((*bio_iter)->bi_next, bio_iter, seg); 785 - } 786 - #endif 736 + bio = cursor->bio; 737 + BUG_ON(!bio); 787 738 788 - static void prepare_write_message_data(struct ceph_connection *con) 789 - { 790 - struct ceph_msg *msg = con->out_msg; 739 + index = cursor->vector_index; 740 + BUG_ON(index >= (unsigned int) bio->bi_vcnt); 791 741 792 - BUG_ON(!msg); 793 - BUG_ON(!msg->hdr.data_len); 794 - 795 - /* initialize page iterator */ 796 - con->out_msg_pos.page = 0; 797 - if (msg->pages) 798 - con->out_msg_pos.page_pos = msg->page_alignment; 742 + bio_vec = &bio->bi_io_vec[index]; 743 + BUG_ON(cursor->vector_offset >= bio_vec->bv_len); 744 + *page_offset = (size_t) (bio_vec->bv_offset + cursor->vector_offset); 745 + BUG_ON(*page_offset >= PAGE_SIZE); 746 + if (cursor->last_piece) /* pagelist offset is always 0 */ 747 + *length = cursor->resid; 799 748 else 800 - con->out_msg_pos.page_pos = 0; 749 + *length = (size_t) (bio_vec->bv_len - cursor->vector_offset); 750 + BUG_ON(*length > cursor->resid); 751 + BUG_ON(*page_offset + *length > PAGE_SIZE); 752 + 753 + return bio_vec->bv_page; 754 + } 755 + 756 + static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor, 757 + size_t bytes) 758 + { 759 + struct bio *bio; 760 + struct bio_vec *bio_vec; 761 + unsigned int index; 762 + 763 + BUG_ON(cursor->data->type != CEPH_MSG_DATA_BIO); 764 + 765 + bio = cursor->bio; 766 + BUG_ON(!bio); 767 + 768 + index = cursor->vector_index; 769 + BUG_ON(index >= (unsigned int) bio->bi_vcnt); 770 + bio_vec = &bio->bi_io_vec[index]; 771 + 772 + /* Advance the cursor offset */ 773 + 774 + BUG_ON(cursor->resid < bytes); 775 + cursor->resid -= bytes; 776 + cursor->vector_offset += bytes; 777 + if (cursor->vector_offset < bio_vec->bv_len) 778 + return false; /* more bytes to process in this segment */ 779 + BUG_ON(cursor->vector_offset != bio_vec->bv_len); 780 + 781 + /* Move on to the next segment, and possibly the next bio */ 782 + 783 + if (++index == (unsigned int) bio->bi_vcnt) { 784 + bio = bio->bi_next; 785 + index = 0; 786 + } 787 + cursor->bio = bio; 788 + cursor->vector_index = index; 789 + cursor->vector_offset = 0; 790 + 791 + if (!cursor->last_piece) { 792 + BUG_ON(!cursor->resid); 793 + BUG_ON(!bio); 794 + /* A short read is OK, so use <= rather than == */ 795 + if (cursor->resid <= bio->bi_io_vec[index].bv_len) 796 + cursor->last_piece = true; 797 + } 798 + 799 + return true; 800 + } 801 + #endif /* CONFIG_BLOCK */ 802 + 803 + /* 804 + * For a page array, a piece comes from the first page in the array 805 + * that has not already been fully consumed. 806 + */ 807 + static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor, 808 + size_t length) 809 + { 810 + struct ceph_msg_data *data = cursor->data; 811 + int page_count; 812 + 813 + BUG_ON(data->type != CEPH_MSG_DATA_PAGES); 814 + 815 + BUG_ON(!data->pages); 816 + BUG_ON(!data->length); 817 + 818 + cursor->resid = min(length, data->length); 819 + page_count = calc_pages_for(data->alignment, (u64)data->length); 820 + cursor->page_offset = data->alignment & ~PAGE_MASK; 821 + cursor->page_index = 0; 822 + BUG_ON(page_count > (int)USHRT_MAX); 823 + cursor->page_count = (unsigned short)page_count; 824 + BUG_ON(length > SIZE_MAX - cursor->page_offset); 825 + cursor->last_piece = (size_t)cursor->page_offset + length <= PAGE_SIZE; 826 + } 827 + 828 + static struct page * 829 + ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor, 830 + size_t *page_offset, size_t *length) 831 + { 832 + struct ceph_msg_data *data = cursor->data; 833 + 834 + BUG_ON(data->type != CEPH_MSG_DATA_PAGES); 835 + 836 + BUG_ON(cursor->page_index >= cursor->page_count); 837 + BUG_ON(cursor->page_offset >= PAGE_SIZE); 838 + 839 + *page_offset = cursor->page_offset; 840 + if (cursor->last_piece) 841 + *length = cursor->resid; 842 + else 843 + *length = PAGE_SIZE - *page_offset; 844 + 845 + return data->pages[cursor->page_index]; 846 + } 847 + 848 + static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor, 849 + size_t bytes) 850 + { 851 + BUG_ON(cursor->data->type != CEPH_MSG_DATA_PAGES); 852 + 853 + BUG_ON(cursor->page_offset + bytes > PAGE_SIZE); 854 + 855 + /* Advance the cursor page offset */ 856 + 857 + cursor->resid -= bytes; 858 + cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK; 859 + if (!bytes || cursor->page_offset) 860 + return false; /* more bytes to process in the current page */ 861 + 862 + /* Move on to the next page; offset is already at 0 */ 863 + 864 + BUG_ON(cursor->page_index >= cursor->page_count); 865 + cursor->page_index++; 866 + cursor->last_piece = cursor->resid <= PAGE_SIZE; 867 + 868 + return true; 869 + } 870 + 871 + /* 872 + * For a pagelist, a piece is whatever remains to be consumed in the 873 + * first page in the list, or the front of the next page. 874 + */ 875 + static void 876 + ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor, 877 + size_t length) 878 + { 879 + struct ceph_msg_data *data = cursor->data; 880 + struct ceph_pagelist *pagelist; 881 + struct page *page; 882 + 883 + BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); 884 + 885 + pagelist = data->pagelist; 886 + BUG_ON(!pagelist); 887 + 888 + if (!length) 889 + return; /* pagelist can be assigned but empty */ 890 + 891 + BUG_ON(list_empty(&pagelist->head)); 892 + page = list_first_entry(&pagelist->head, struct page, lru); 893 + 894 + cursor->resid = min(length, pagelist->length); 895 + cursor->page = page; 896 + cursor->offset = 0; 897 + cursor->last_piece = cursor->resid <= PAGE_SIZE; 898 + } 899 + 900 + static struct page * 901 + ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor *cursor, 902 + size_t *page_offset, size_t *length) 903 + { 904 + struct ceph_msg_data *data = cursor->data; 905 + struct ceph_pagelist *pagelist; 906 + 907 + BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); 908 + 909 + pagelist = data->pagelist; 910 + BUG_ON(!pagelist); 911 + 912 + BUG_ON(!cursor->page); 913 + BUG_ON(cursor->offset + cursor->resid != pagelist->length); 914 + 915 + /* offset of first page in pagelist is always 0 */ 916 + *page_offset = cursor->offset & ~PAGE_MASK; 917 + if (cursor->last_piece) 918 + *length = cursor->resid; 919 + else 920 + *length = PAGE_SIZE - *page_offset; 921 + 922 + return cursor->page; 923 + } 924 + 925 + static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor, 926 + size_t bytes) 927 + { 928 + struct ceph_msg_data *data = cursor->data; 929 + struct ceph_pagelist *pagelist; 930 + 931 + BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); 932 + 933 + pagelist = data->pagelist; 934 + BUG_ON(!pagelist); 935 + 936 + BUG_ON(cursor->offset + cursor->resid != pagelist->length); 937 + BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE); 938 + 939 + /* Advance the cursor offset */ 940 + 941 + cursor->resid -= bytes; 942 + cursor->offset += bytes; 943 + /* offset of first page in pagelist is always 0 */ 944 + if (!bytes || cursor->offset & ~PAGE_MASK) 945 + return false; /* more bytes to process in the current page */ 946 + 947 + /* Move on to the next page */ 948 + 949 + BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head)); 950 + cursor->page = list_entry_next(cursor->page, lru); 951 + cursor->last_piece = cursor->resid <= PAGE_SIZE; 952 + 953 + return true; 954 + } 955 + 956 + /* 957 + * Message data is handled (sent or received) in pieces, where each 958 + * piece resides on a single page. The network layer might not 959 + * consume an entire piece at once. A data item's cursor keeps 960 + * track of which piece is next to process and how much remains to 961 + * be processed in that piece. It also tracks whether the current 962 + * piece is the last one in the data item. 963 + */ 964 + static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor) 965 + { 966 + size_t length = cursor->total_resid; 967 + 968 + switch (cursor->data->type) { 969 + case CEPH_MSG_DATA_PAGELIST: 970 + ceph_msg_data_pagelist_cursor_init(cursor, length); 971 + break; 972 + case CEPH_MSG_DATA_PAGES: 973 + ceph_msg_data_pages_cursor_init(cursor, length); 974 + break; 801 975 #ifdef CONFIG_BLOCK 802 - if (msg->bio) 803 - init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg); 804 - #endif 805 - con->out_msg_pos.data_pos = 0; 806 - con->out_msg_pos.did_page_crc = false; 807 - con->out_more = 1; /* data + footer will follow */ 976 + case CEPH_MSG_DATA_BIO: 977 + ceph_msg_data_bio_cursor_init(cursor, length); 978 + break; 979 + #endif /* CONFIG_BLOCK */ 980 + case CEPH_MSG_DATA_NONE: 981 + default: 982 + /* BUG(); */ 983 + break; 984 + } 985 + cursor->need_crc = true; 986 + } 987 + 988 + static void ceph_msg_data_cursor_init(struct ceph_msg *msg, size_t length) 989 + { 990 + struct ceph_msg_data_cursor *cursor = &msg->cursor; 991 + struct ceph_msg_data *data; 992 + 993 + BUG_ON(!length); 994 + BUG_ON(length > msg->data_length); 995 + BUG_ON(list_empty(&msg->data)); 996 + 997 + cursor->data_head = &msg->data; 998 + cursor->total_resid = length; 999 + data = list_first_entry(&msg->data, struct ceph_msg_data, links); 1000 + cursor->data = data; 1001 + 1002 + __ceph_msg_data_cursor_init(cursor); 1003 + } 1004 + 1005 + /* 1006 + * Return the page containing the next piece to process for a given 1007 + * data item, and supply the page offset and length of that piece. 1008 + * Indicate whether this is the last piece in this data item. 1009 + */ 1010 + static struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor, 1011 + size_t *page_offset, size_t *length, 1012 + bool *last_piece) 1013 + { 1014 + struct page *page; 1015 + 1016 + switch (cursor->data->type) { 1017 + case CEPH_MSG_DATA_PAGELIST: 1018 + page = ceph_msg_data_pagelist_next(cursor, page_offset, length); 1019 + break; 1020 + case CEPH_MSG_DATA_PAGES: 1021 + page = ceph_msg_data_pages_next(cursor, page_offset, length); 1022 + break; 1023 + #ifdef CONFIG_BLOCK 1024 + case CEPH_MSG_DATA_BIO: 1025 + page = ceph_msg_data_bio_next(cursor, page_offset, length); 1026 + break; 1027 + #endif /* CONFIG_BLOCK */ 1028 + case CEPH_MSG_DATA_NONE: 1029 + default: 1030 + page = NULL; 1031 + break; 1032 + } 1033 + BUG_ON(!page); 1034 + BUG_ON(*page_offset + *length > PAGE_SIZE); 1035 + BUG_ON(!*length); 1036 + if (last_piece) 1037 + *last_piece = cursor->last_piece; 1038 + 1039 + return page; 1040 + } 1041 + 1042 + /* 1043 + * Returns true if the result moves the cursor on to the next piece 1044 + * of the data item. 1045 + */ 1046 + static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, 1047 + size_t bytes) 1048 + { 1049 + bool new_piece; 1050 + 1051 + BUG_ON(bytes > cursor->resid); 1052 + switch (cursor->data->type) { 1053 + case CEPH_MSG_DATA_PAGELIST: 1054 + new_piece = ceph_msg_data_pagelist_advance(cursor, bytes); 1055 + break; 1056 + case CEPH_MSG_DATA_PAGES: 1057 + new_piece = ceph_msg_data_pages_advance(cursor, bytes); 1058 + break; 1059 + #ifdef CONFIG_BLOCK 1060 + case CEPH_MSG_DATA_BIO: 1061 + new_piece = ceph_msg_data_bio_advance(cursor, bytes); 1062 + break; 1063 + #endif /* CONFIG_BLOCK */ 1064 + case CEPH_MSG_DATA_NONE: 1065 + default: 1066 + BUG(); 1067 + break; 1068 + } 1069 + cursor->total_resid -= bytes; 1070 + 1071 + if (!cursor->resid && cursor->total_resid) { 1072 + WARN_ON(!cursor->last_piece); 1073 + BUG_ON(list_is_last(&cursor->data->links, cursor->data_head)); 1074 + cursor->data = list_entry_next(cursor->data, links); 1075 + __ceph_msg_data_cursor_init(cursor); 1076 + new_piece = true; 1077 + } 1078 + cursor->need_crc = new_piece; 1079 + 1080 + return new_piece; 1081 + } 1082 + 1083 + static void prepare_message_data(struct ceph_msg *msg, u32 data_len) 1084 + { 1085 + BUG_ON(!msg); 1086 + BUG_ON(!data_len); 1087 + 1088 + /* Initialize data cursor */ 1089 + 1090 + ceph_msg_data_cursor_init(msg, (size_t)data_len); 808 1091 } 809 1092 810 1093 /* ··· 1214 803 m->hdr.seq = cpu_to_le64(++con->out_seq); 1215 804 m->needs_out_seq = false; 1216 805 } 1217 - #ifdef CONFIG_BLOCK 1218 - else 1219 - m->bio_iter = NULL; 1220 - #endif 806 + WARN_ON(m->data_length != le32_to_cpu(m->hdr.data_len)); 1221 807 1222 - dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n", 808 + dout("prepare_write_message %p seq %lld type %d len %d+%d+%zd\n", 1223 809 m, con->out_seq, le16_to_cpu(m->hdr.type), 1224 810 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len), 1225 - le32_to_cpu(m->hdr.data_len), 1226 - m->nr_pages); 811 + m->data_length); 1227 812 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len); 1228 813 1229 814 /* tag + hdr + front + middle */ ··· 1250 843 1251 844 /* is there a data payload? */ 1252 845 con->out_msg->footer.data_crc = 0; 1253 - if (m->hdr.data_len) 1254 - prepare_write_message_data(con); 1255 - else 846 + if (m->data_length) { 847 + prepare_message_data(con->out_msg, m->data_length); 848 + con->out_more = 1; /* data + footer will follow */ 849 + } else { 1256 850 /* no, queue up footer too and be done */ 1257 851 prepare_write_message_footer(con); 852 + } 1258 853 1259 854 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1260 855 } ··· 1279 870 &con->out_temp_ack); 1280 871 1281 872 con->out_more = 1; /* more will follow.. eventually.. */ 873 + con_flag_set(con, CON_FLAG_WRITE_PENDING); 874 + } 875 + 876 + /* 877 + * Prepare to share the seq during handshake 878 + */ 879 + static void prepare_write_seq(struct ceph_connection *con) 880 + { 881 + dout("prepare_write_seq %p %llu -> %llu\n", con, 882 + con->in_seq_acked, con->in_seq); 883 + con->in_seq_acked = con->in_seq; 884 + 885 + con_out_kvec_reset(con); 886 + 887 + con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 888 + con_out_kvec_add(con, sizeof (con->out_temp_ack), 889 + &con->out_temp_ack); 890 + 1282 891 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1283 892 } 1284 893 ··· 1449 1022 return ret; /* done! */ 1450 1023 } 1451 1024 1452 - static void out_msg_pos_next(struct ceph_connection *con, struct page *page, 1453 - size_t len, size_t sent, bool in_trail) 1025 + static u32 ceph_crc32c_page(u32 crc, struct page *page, 1026 + unsigned int page_offset, 1027 + unsigned int length) 1454 1028 { 1455 - struct ceph_msg *msg = con->out_msg; 1029 + char *kaddr; 1456 1030 1457 - BUG_ON(!msg); 1458 - BUG_ON(!sent); 1031 + kaddr = kmap(page); 1032 + BUG_ON(kaddr == NULL); 1033 + crc = crc32c(crc, kaddr + page_offset, length); 1034 + kunmap(page); 1459 1035 1460 - con->out_msg_pos.data_pos += sent; 1461 - con->out_msg_pos.page_pos += sent; 1462 - if (sent < len) 1463 - return; 1464 - 1465 - BUG_ON(sent != len); 1466 - con->out_msg_pos.page_pos = 0; 1467 - con->out_msg_pos.page++; 1468 - con->out_msg_pos.did_page_crc = false; 1469 - if (in_trail) 1470 - list_move_tail(&page->lru, 1471 - &msg->trail->head); 1472 - else if (msg->pagelist) 1473 - list_move_tail(&page->lru, 1474 - &msg->pagelist->head); 1475 - #ifdef CONFIG_BLOCK 1476 - else if (msg->bio) 1477 - iter_bio_next(&msg->bio_iter, &msg->bio_seg); 1478 - #endif 1036 + return crc; 1479 1037 } 1480 - 1481 1038 /* 1482 1039 * Write as much message data payload as we can. If we finish, queue 1483 1040 * up the footer. ··· 1469 1058 * 0 -> socket full, but more to do 1470 1059 * <0 -> error 1471 1060 */ 1472 - static int write_partial_msg_pages(struct ceph_connection *con) 1061 + static int write_partial_message_data(struct ceph_connection *con) 1473 1062 { 1474 1063 struct ceph_msg *msg = con->out_msg; 1475 - unsigned int data_len = le32_to_cpu(msg->hdr.data_len); 1476 - size_t len; 1064 + struct ceph_msg_data_cursor *cursor = &msg->cursor; 1477 1065 bool do_datacrc = !con->msgr->nocrc; 1478 - int ret; 1479 - int total_max_write; 1480 - bool in_trail = false; 1481 - const size_t trail_len = (msg->trail ? msg->trail->length : 0); 1482 - const size_t trail_off = data_len - trail_len; 1066 + u32 crc; 1483 1067 1484 - dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n", 1485 - con, msg, con->out_msg_pos.page, msg->nr_pages, 1486 - con->out_msg_pos.page_pos); 1068 + dout("%s %p msg %p\n", __func__, con, msg); 1069 + 1070 + if (list_empty(&msg->data)) 1071 + return -EINVAL; 1487 1072 1488 1073 /* 1489 1074 * Iterate through each page that contains data to be ··· 1489 1082 * need to map the page. If we have no pages, they have 1490 1083 * been revoked, so use the zero page. 1491 1084 */ 1492 - while (data_len > con->out_msg_pos.data_pos) { 1493 - struct page *page = NULL; 1494 - int max_write = PAGE_SIZE; 1495 - int bio_offset = 0; 1085 + crc = do_datacrc ? le32_to_cpu(msg->footer.data_crc) : 0; 1086 + while (cursor->resid) { 1087 + struct page *page; 1088 + size_t page_offset; 1089 + size_t length; 1090 + bool last_piece; 1091 + bool need_crc; 1092 + int ret; 1496 1093 1497 - in_trail = in_trail || con->out_msg_pos.data_pos >= trail_off; 1498 - if (!in_trail) 1499 - total_max_write = trail_off - con->out_msg_pos.data_pos; 1094 + page = ceph_msg_data_next(&msg->cursor, &page_offset, &length, 1095 + &last_piece); 1096 + ret = ceph_tcp_sendpage(con->sock, page, page_offset, 1097 + length, last_piece); 1098 + if (ret <= 0) { 1099 + if (do_datacrc) 1100 + msg->footer.data_crc = cpu_to_le32(crc); 1500 1101 1501 - if (in_trail) { 1502 - total_max_write = data_len - con->out_msg_pos.data_pos; 1503 - 1504 - page = list_first_entry(&msg->trail->head, 1505 - struct page, lru); 1506 - } else if (msg->pages) { 1507 - page = msg->pages[con->out_msg_pos.page]; 1508 - } else if (msg->pagelist) { 1509 - page = list_first_entry(&msg->pagelist->head, 1510 - struct page, lru); 1511 - #ifdef CONFIG_BLOCK 1512 - } else if (msg->bio) { 1513 - struct bio_vec *bv; 1514 - 1515 - bv = bio_iovec_idx(msg->bio_iter, msg->bio_seg); 1516 - page = bv->bv_page; 1517 - bio_offset = bv->bv_offset; 1518 - max_write = bv->bv_len; 1519 - #endif 1520 - } else { 1521 - page = zero_page; 1102 + return ret; 1522 1103 } 1523 - len = min_t(int, max_write - con->out_msg_pos.page_pos, 1524 - total_max_write); 1525 - 1526 - if (do_datacrc && !con->out_msg_pos.did_page_crc) { 1527 - void *base; 1528 - u32 crc = le32_to_cpu(msg->footer.data_crc); 1529 - char *kaddr; 1530 - 1531 - kaddr = kmap(page); 1532 - BUG_ON(kaddr == NULL); 1533 - base = kaddr + con->out_msg_pos.page_pos + bio_offset; 1534 - crc = crc32c(crc, base, len); 1535 - kunmap(page); 1536 - msg->footer.data_crc = cpu_to_le32(crc); 1537 - con->out_msg_pos.did_page_crc = true; 1538 - } 1539 - ret = ceph_tcp_sendpage(con->sock, page, 1540 - con->out_msg_pos.page_pos + bio_offset, 1541 - len, 1); 1542 - if (ret <= 0) 1543 - goto out; 1544 - 1545 - out_msg_pos_next(con, page, len, (size_t) ret, in_trail); 1104 + if (do_datacrc && cursor->need_crc) 1105 + crc = ceph_crc32c_page(crc, page, page_offset, length); 1106 + need_crc = ceph_msg_data_advance(&msg->cursor, (size_t)ret); 1546 1107 } 1547 1108 1548 - dout("write_partial_msg_pages %p msg %p done\n", con, msg); 1109 + dout("%s %p msg %p done\n", __func__, con, msg); 1549 1110 1550 1111 /* prepare and queue up footer, too */ 1551 - if (!do_datacrc) 1112 + if (do_datacrc) 1113 + msg->footer.data_crc = cpu_to_le32(crc); 1114 + else 1552 1115 msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC; 1553 1116 con_out_kvec_reset(con); 1554 1117 prepare_write_message_footer(con); 1555 - ret = 1; 1556 - out: 1557 - return ret; 1118 + 1119 + return 1; /* must return > 0 to indicate success */ 1558 1120 } 1559 1121 1560 1122 /* ··· 1536 1160 while (con->out_skip > 0) { 1537 1161 size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE); 1538 1162 1539 - ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, 1); 1163 + ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, true); 1540 1164 if (ret <= 0) 1541 1165 goto out; 1542 1166 con->out_skip -= ret; ··· 1565 1189 { 1566 1190 dout("prepare_read_ack %p\n", con); 1567 1191 con->in_base_pos = 0; 1192 + } 1193 + 1194 + static void prepare_read_seq(struct ceph_connection *con) 1195 + { 1196 + dout("prepare_read_seq %p\n", con); 1197 + con->in_base_pos = 0; 1198 + con->in_tag = CEPH_MSGR_TAG_SEQ; 1568 1199 } 1569 1200 1570 1201 static void prepare_read_tag(struct ceph_connection *con) ··· 1980 1597 con->error_msg = "connect authorization failure"; 1981 1598 return -1; 1982 1599 } 1983 - con->auth_retry = 1; 1984 1600 con_out_kvec_reset(con); 1985 1601 ret = prepare_write_connect(con); 1986 1602 if (ret < 0) ··· 2050 1668 prepare_read_connect(con); 2051 1669 break; 2052 1670 1671 + case CEPH_MSGR_TAG_SEQ: 2053 1672 case CEPH_MSGR_TAG_READY: 2054 1673 if (req_feat & ~server_feat) { 2055 1674 pr_err("%s%lld %s protocol feature mismatch," ··· 2065 1682 2066 1683 WARN_ON(con->state != CON_STATE_NEGOTIATING); 2067 1684 con->state = CON_STATE_OPEN; 2068 - 1685 + con->auth_retry = 0; /* we authenticated; clear flag */ 2069 1686 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq); 2070 1687 con->connect_seq++; 2071 1688 con->peer_features = server_feat; ··· 2081 1698 2082 1699 con->delay = 0; /* reset backoff memory */ 2083 1700 2084 - prepare_read_tag(con); 1701 + if (con->in_reply.tag == CEPH_MSGR_TAG_SEQ) { 1702 + prepare_write_seq(con); 1703 + prepare_read_seq(con); 1704 + } else { 1705 + prepare_read_tag(con); 1706 + } 2085 1707 break; 2086 1708 2087 1709 case CEPH_MSGR_TAG_WAIT: ··· 2120 1732 return read_partial(con, end, size, &con->in_temp_ack); 2121 1733 } 2122 1734 2123 - 2124 1735 /* 2125 1736 * We can finally discard anything that's been acked. 2126 1737 */ ··· 2142 1755 } 2143 1756 prepare_read_tag(con); 2144 1757 } 2145 - 2146 - 2147 1758 2148 1759 2149 1760 static int read_partial_message_section(struct ceph_connection *con, ··· 2167 1782 return 1; 2168 1783 } 2169 1784 2170 - static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip); 2171 - 2172 - static int read_partial_message_pages(struct ceph_connection *con, 2173 - struct page **pages, 2174 - unsigned int data_len, bool do_datacrc) 1785 + static int read_partial_msg_data(struct ceph_connection *con) 2175 1786 { 2176 - void *p; 1787 + struct ceph_msg *msg = con->in_msg; 1788 + struct ceph_msg_data_cursor *cursor = &msg->cursor; 1789 + const bool do_datacrc = !con->msgr->nocrc; 1790 + struct page *page; 1791 + size_t page_offset; 1792 + size_t length; 1793 + u32 crc = 0; 2177 1794 int ret; 2178 - int left; 2179 1795 2180 - left = min((int)(data_len - con->in_msg_pos.data_pos), 2181 - (int)(PAGE_SIZE - con->in_msg_pos.page_pos)); 2182 - /* (page) data */ 2183 - BUG_ON(pages == NULL); 2184 - p = kmap(pages[con->in_msg_pos.page]); 2185 - ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos, 2186 - left); 2187 - if (ret > 0 && do_datacrc) 2188 - con->in_data_crc = 2189 - crc32c(con->in_data_crc, 2190 - p + con->in_msg_pos.page_pos, ret); 2191 - kunmap(pages[con->in_msg_pos.page]); 2192 - if (ret <= 0) 2193 - return ret; 2194 - con->in_msg_pos.data_pos += ret; 2195 - con->in_msg_pos.page_pos += ret; 2196 - if (con->in_msg_pos.page_pos == PAGE_SIZE) { 2197 - con->in_msg_pos.page_pos = 0; 2198 - con->in_msg_pos.page++; 1796 + BUG_ON(!msg); 1797 + if (list_empty(&msg->data)) 1798 + return -EIO; 1799 + 1800 + if (do_datacrc) 1801 + crc = con->in_data_crc; 1802 + while (cursor->resid) { 1803 + page = ceph_msg_data_next(&msg->cursor, &page_offset, &length, 1804 + NULL); 1805 + ret = ceph_tcp_recvpage(con->sock, page, page_offset, length); 1806 + if (ret <= 0) { 1807 + if (do_datacrc) 1808 + con->in_data_crc = crc; 1809 + 1810 + return ret; 1811 + } 1812 + 1813 + if (do_datacrc) 1814 + crc = ceph_crc32c_page(crc, page, page_offset, ret); 1815 + (void) ceph_msg_data_advance(&msg->cursor, (size_t)ret); 2199 1816 } 1817 + if (do_datacrc) 1818 + con->in_data_crc = crc; 2200 1819 2201 - return ret; 1820 + return 1; /* must return > 0 to indicate success */ 2202 1821 } 2203 - 2204 - #ifdef CONFIG_BLOCK 2205 - static int read_partial_message_bio(struct ceph_connection *con, 2206 - struct bio **bio_iter, int *bio_seg, 2207 - unsigned int data_len, bool do_datacrc) 2208 - { 2209 - struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg); 2210 - void *p; 2211 - int ret, left; 2212 - 2213 - left = min((int)(data_len - con->in_msg_pos.data_pos), 2214 - (int)(bv->bv_len - con->in_msg_pos.page_pos)); 2215 - 2216 - p = kmap(bv->bv_page) + bv->bv_offset; 2217 - 2218 - ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos, 2219 - left); 2220 - if (ret > 0 && do_datacrc) 2221 - con->in_data_crc = 2222 - crc32c(con->in_data_crc, 2223 - p + con->in_msg_pos.page_pos, ret); 2224 - kunmap(bv->bv_page); 2225 - if (ret <= 0) 2226 - return ret; 2227 - con->in_msg_pos.data_pos += ret; 2228 - con->in_msg_pos.page_pos += ret; 2229 - if (con->in_msg_pos.page_pos == bv->bv_len) { 2230 - con->in_msg_pos.page_pos = 0; 2231 - iter_bio_next(bio_iter, bio_seg); 2232 - } 2233 - 2234 - return ret; 2235 - } 2236 - #endif 2237 1822 2238 1823 /* 2239 1824 * read (part of) a message. 2240 1825 */ 1826 + static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip); 1827 + 2241 1828 static int read_partial_message(struct ceph_connection *con) 2242 1829 { 2243 1830 struct ceph_msg *m = con->in_msg; ··· 2242 1885 if (front_len > CEPH_MSG_MAX_FRONT_LEN) 2243 1886 return -EIO; 2244 1887 middle_len = le32_to_cpu(con->in_hdr.middle_len); 2245 - if (middle_len > CEPH_MSG_MAX_DATA_LEN) 1888 + if (middle_len > CEPH_MSG_MAX_MIDDLE_LEN) 2246 1889 return -EIO; 2247 1890 data_len = le32_to_cpu(con->in_hdr.data_len); 2248 1891 if (data_len > CEPH_MSG_MAX_DATA_LEN) ··· 2271 1914 int skip = 0; 2272 1915 2273 1916 dout("got hdr type %d front %d data %d\n", con->in_hdr.type, 2274 - con->in_hdr.front_len, con->in_hdr.data_len); 1917 + front_len, data_len); 2275 1918 ret = ceph_con_in_msg_alloc(con, &skip); 2276 1919 if (ret < 0) 2277 1920 return ret; 1921 + 1922 + BUG_ON(!con->in_msg ^ skip); 1923 + if (con->in_msg && data_len > con->in_msg->data_length) { 1924 + pr_warning("%s skipping long message (%u > %zd)\n", 1925 + __func__, data_len, con->in_msg->data_length); 1926 + ceph_msg_put(con->in_msg); 1927 + con->in_msg = NULL; 1928 + skip = 1; 1929 + } 2278 1930 if (skip) { 2279 1931 /* skip this message */ 2280 1932 dout("alloc_msg said skip message\n"); 2281 - BUG_ON(con->in_msg); 2282 1933 con->in_base_pos = -front_len - middle_len - data_len - 2283 1934 sizeof(m->footer); 2284 1935 con->in_tag = CEPH_MSGR_TAG_READY; ··· 2301 1936 if (m->middle) 2302 1937 m->middle->vec.iov_len = 0; 2303 1938 2304 - con->in_msg_pos.page = 0; 2305 - if (m->pages) 2306 - con->in_msg_pos.page_pos = m->page_alignment; 2307 - else 2308 - con->in_msg_pos.page_pos = 0; 2309 - con->in_msg_pos.data_pos = 0; 1939 + /* prepare for data payload, if any */ 2310 1940 2311 - #ifdef CONFIG_BLOCK 2312 - if (m->bio) 2313 - init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg); 2314 - #endif 1941 + if (data_len) 1942 + prepare_message_data(con->in_msg, data_len); 2315 1943 } 2316 1944 2317 1945 /* front */ ··· 2323 1965 } 2324 1966 2325 1967 /* (page) data */ 2326 - while (con->in_msg_pos.data_pos < data_len) { 2327 - if (m->pages) { 2328 - ret = read_partial_message_pages(con, m->pages, 2329 - data_len, do_datacrc); 2330 - if (ret <= 0) 2331 - return ret; 2332 - #ifdef CONFIG_BLOCK 2333 - } else if (m->bio) { 2334 - BUG_ON(!m->bio_iter); 2335 - ret = read_partial_message_bio(con, 2336 - &m->bio_iter, &m->bio_seg, 2337 - data_len, do_datacrc); 2338 - if (ret <= 0) 2339 - return ret; 2340 - #endif 2341 - } else { 2342 - BUG_ON(1); 2343 - } 1968 + if (data_len) { 1969 + ret = read_partial_msg_data(con); 1970 + if (ret <= 0) 1971 + return ret; 2344 1972 } 2345 1973 2346 1974 /* footer */ ··· 2452 2108 goto do_next; 2453 2109 } 2454 2110 2455 - ret = write_partial_msg_pages(con); 2111 + ret = write_partial_message_data(con); 2456 2112 if (ret == 1) 2457 2113 goto more_kvec; /* we need to send the footer, too! */ 2458 2114 if (ret == 0) 2459 2115 goto out; 2460 2116 if (ret < 0) { 2461 - dout("try_write write_partial_msg_pages err %d\n", 2117 + dout("try_write write_partial_message_data err %d\n", 2462 2118 ret); 2463 2119 goto out; 2464 2120 } ··· 2610 2266 prepare_read_tag(con); 2611 2267 goto more; 2612 2268 } 2613 - if (con->in_tag == CEPH_MSGR_TAG_ACK) { 2269 + if (con->in_tag == CEPH_MSGR_TAG_ACK || 2270 + con->in_tag == CEPH_MSGR_TAG_SEQ) { 2271 + /* 2272 + * the final handshake seq exchange is semantically 2273 + * equivalent to an ACK 2274 + */ 2614 2275 ret = read_partial_ack(con); 2615 2276 if (ret <= 0) 2616 2277 goto out; ··· 3021 2672 } 3022 2673 EXPORT_SYMBOL(ceph_con_keepalive); 3023 2674 2675 + static struct ceph_msg_data *ceph_msg_data_create(enum ceph_msg_data_type type) 2676 + { 2677 + struct ceph_msg_data *data; 2678 + 2679 + if (WARN_ON(!ceph_msg_data_type_valid(type))) 2680 + return NULL; 2681 + 2682 + data = kmem_cache_zalloc(ceph_msg_data_cache, GFP_NOFS); 2683 + if (data) 2684 + data->type = type; 2685 + INIT_LIST_HEAD(&data->links); 2686 + 2687 + return data; 2688 + } 2689 + 2690 + static void ceph_msg_data_destroy(struct ceph_msg_data *data) 2691 + { 2692 + if (!data) 2693 + return; 2694 + 2695 + WARN_ON(!list_empty(&data->links)); 2696 + if (data->type == CEPH_MSG_DATA_PAGELIST) { 2697 + ceph_pagelist_release(data->pagelist); 2698 + kfree(data->pagelist); 2699 + } 2700 + kmem_cache_free(ceph_msg_data_cache, data); 2701 + } 2702 + 2703 + void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, 2704 + size_t length, size_t alignment) 2705 + { 2706 + struct ceph_msg_data *data; 2707 + 2708 + BUG_ON(!pages); 2709 + BUG_ON(!length); 2710 + 2711 + data = ceph_msg_data_create(CEPH_MSG_DATA_PAGES); 2712 + BUG_ON(!data); 2713 + data->pages = pages; 2714 + data->length = length; 2715 + data->alignment = alignment & ~PAGE_MASK; 2716 + 2717 + list_add_tail(&data->links, &msg->data); 2718 + msg->data_length += length; 2719 + } 2720 + EXPORT_SYMBOL(ceph_msg_data_add_pages); 2721 + 2722 + void ceph_msg_data_add_pagelist(struct ceph_msg *msg, 2723 + struct ceph_pagelist *pagelist) 2724 + { 2725 + struct ceph_msg_data *data; 2726 + 2727 + BUG_ON(!pagelist); 2728 + BUG_ON(!pagelist->length); 2729 + 2730 + data = ceph_msg_data_create(CEPH_MSG_DATA_PAGELIST); 2731 + BUG_ON(!data); 2732 + data->pagelist = pagelist; 2733 + 2734 + list_add_tail(&data->links, &msg->data); 2735 + msg->data_length += pagelist->length; 2736 + } 2737 + EXPORT_SYMBOL(ceph_msg_data_add_pagelist); 2738 + 2739 + #ifdef CONFIG_BLOCK 2740 + void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio, 2741 + size_t length) 2742 + { 2743 + struct ceph_msg_data *data; 2744 + 2745 + BUG_ON(!bio); 2746 + 2747 + data = ceph_msg_data_create(CEPH_MSG_DATA_BIO); 2748 + BUG_ON(!data); 2749 + data->bio = bio; 2750 + data->bio_length = length; 2751 + 2752 + list_add_tail(&data->links, &msg->data); 2753 + msg->data_length += length; 2754 + } 2755 + EXPORT_SYMBOL(ceph_msg_data_add_bio); 2756 + #endif /* CONFIG_BLOCK */ 3024 2757 3025 2758 /* 3026 2759 * construct a new message with given type, size ··· 3113 2682 { 3114 2683 struct ceph_msg *m; 3115 2684 3116 - m = kmalloc(sizeof(*m), flags); 2685 + m = kmem_cache_zalloc(ceph_msg_cache, flags); 3117 2686 if (m == NULL) 3118 2687 goto out; 3119 - kref_init(&m->kref); 3120 2688 3121 - m->con = NULL; 3122 - INIT_LIST_HEAD(&m->list_head); 3123 - 3124 - m->hdr.tid = 0; 3125 2689 m->hdr.type = cpu_to_le16(type); 3126 2690 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT); 3127 - m->hdr.version = 0; 3128 2691 m->hdr.front_len = cpu_to_le32(front_len); 3129 - m->hdr.middle_len = 0; 3130 - m->hdr.data_len = 0; 3131 - m->hdr.data_off = 0; 3132 - m->hdr.reserved = 0; 3133 - m->footer.front_crc = 0; 3134 - m->footer.middle_crc = 0; 3135 - m->footer.data_crc = 0; 3136 - m->footer.flags = 0; 3137 - m->front_max = front_len; 3138 - m->front_is_vmalloc = false; 3139 - m->more_to_follow = false; 3140 - m->ack_stamp = 0; 3141 - m->pool = NULL; 3142 2692 3143 - /* middle */ 3144 - m->middle = NULL; 3145 - 3146 - /* data */ 3147 - m->nr_pages = 0; 3148 - m->page_alignment = 0; 3149 - m->pages = NULL; 3150 - m->pagelist = NULL; 3151 - #ifdef CONFIG_BLOCK 3152 - m->bio = NULL; 3153 - m->bio_iter = NULL; 3154 - m->bio_seg = 0; 3155 - #endif /* CONFIG_BLOCK */ 3156 - m->trail = NULL; 2693 + INIT_LIST_HEAD(&m->list_head); 2694 + kref_init(&m->kref); 2695 + INIT_LIST_HEAD(&m->data); 3157 2696 3158 2697 /* front */ 2698 + m->front_max = front_len; 3159 2699 if (front_len) { 3160 2700 if (front_len > PAGE_CACHE_SIZE) { 3161 2701 m->front.iov_base = __vmalloc(front_len, flags, ··· 3204 2802 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip) 3205 2803 { 3206 2804 struct ceph_msg_header *hdr = &con->in_hdr; 3207 - int type = le16_to_cpu(hdr->type); 3208 - int front_len = le32_to_cpu(hdr->front_len); 3209 2805 int middle_len = le32_to_cpu(hdr->middle_len); 2806 + struct ceph_msg *msg; 3210 2807 int ret = 0; 3211 2808 3212 2809 BUG_ON(con->in_msg != NULL); 2810 + BUG_ON(!con->ops->alloc_msg); 3213 2811 3214 - if (con->ops->alloc_msg) { 3215 - struct ceph_msg *msg; 3216 - 3217 - mutex_unlock(&con->mutex); 3218 - msg = con->ops->alloc_msg(con, hdr, skip); 3219 - mutex_lock(&con->mutex); 3220 - if (con->state != CON_STATE_OPEN) { 3221 - if (msg) 3222 - ceph_msg_put(msg); 3223 - return -EAGAIN; 3224 - } 3225 - con->in_msg = msg; 3226 - if (con->in_msg) { 3227 - con->in_msg->con = con->ops->get(con); 3228 - BUG_ON(con->in_msg->con == NULL); 3229 - } 3230 - if (*skip) { 3231 - con->in_msg = NULL; 3232 - return 0; 3233 - } 3234 - if (!con->in_msg) { 3235 - con->error_msg = 3236 - "error allocating memory for incoming message"; 3237 - return -ENOMEM; 3238 - } 2812 + mutex_unlock(&con->mutex); 2813 + msg = con->ops->alloc_msg(con, hdr, skip); 2814 + mutex_lock(&con->mutex); 2815 + if (con->state != CON_STATE_OPEN) { 2816 + if (msg) 2817 + ceph_msg_put(msg); 2818 + return -EAGAIN; 3239 2819 } 3240 - if (!con->in_msg) { 3241 - con->in_msg = ceph_msg_new(type, front_len, GFP_NOFS, false); 3242 - if (!con->in_msg) { 3243 - pr_err("unable to allocate msg type %d len %d\n", 3244 - type, front_len); 3245 - return -ENOMEM; 3246 - } 2820 + if (msg) { 2821 + BUG_ON(*skip); 2822 + con->in_msg = msg; 3247 2823 con->in_msg->con = con->ops->get(con); 3248 2824 BUG_ON(con->in_msg->con == NULL); 3249 - con->in_msg->page_alignment = le16_to_cpu(hdr->data_off); 2825 + } else { 2826 + /* 2827 + * Null message pointer means either we should skip 2828 + * this message or we couldn't allocate memory. The 2829 + * former is not an error. 2830 + */ 2831 + if (*skip) 2832 + return 0; 2833 + con->error_msg = "error allocating memory for incoming message"; 2834 + 2835 + return -ENOMEM; 3250 2836 } 3251 2837 memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr)); 3252 2838 ··· 3260 2870 vfree(m->front.iov_base); 3261 2871 else 3262 2872 kfree(m->front.iov_base); 3263 - kfree(m); 2873 + kmem_cache_free(ceph_msg_cache, m); 3264 2874 } 3265 2875 3266 2876 /* ··· 3269 2879 void ceph_msg_last_put(struct kref *kref) 3270 2880 { 3271 2881 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref); 2882 + LIST_HEAD(data); 2883 + struct list_head *links; 2884 + struct list_head *next; 3272 2885 3273 2886 dout("ceph_msg_put last one on %p\n", m); 3274 2887 WARN_ON(!list_empty(&m->list_head)); ··· 3281 2888 ceph_buffer_put(m->middle); 3282 2889 m->middle = NULL; 3283 2890 } 3284 - m->nr_pages = 0; 3285 - m->pages = NULL; 3286 2891 3287 - if (m->pagelist) { 3288 - ceph_pagelist_release(m->pagelist); 3289 - kfree(m->pagelist); 3290 - m->pagelist = NULL; 2892 + list_splice_init(&m->data, &data); 2893 + list_for_each_safe(links, next, &data) { 2894 + struct ceph_msg_data *data; 2895 + 2896 + data = list_entry(links, struct ceph_msg_data, links); 2897 + list_del_init(links); 2898 + ceph_msg_data_destroy(data); 3291 2899 } 3292 - 3293 - m->trail = NULL; 2900 + m->data_length = 0; 3294 2901 3295 2902 if (m->pool) 3296 2903 ceph_msgpool_put(m->pool, m); ··· 3301 2908 3302 2909 void ceph_msg_dump(struct ceph_msg *msg) 3303 2910 { 3304 - pr_debug("msg_dump %p (front_max %d nr_pages %d)\n", msg, 3305 - msg->front_max, msg->nr_pages); 2911 + pr_debug("msg_dump %p (front_max %d length %zd)\n", msg, 2912 + msg->front_max, msg->data_length); 3306 2913 print_hex_dump(KERN_DEBUG, "header: ", 3307 2914 DUMP_PREFIX_OFFSET, 16, 1, 3308 2915 &msg->hdr, sizeof(msg->hdr), true);
+3 -4
net/ceph/mon_client.c
··· 737 737 738 738 __validate_auth(monc); 739 739 740 - if (monc->auth->ops->is_authenticated(monc->auth)) 740 + if (ceph_auth_is_authenticated(monc->auth)) 741 741 __send_subscribe(monc); 742 742 } 743 743 __schedule_delayed(monc); ··· 892 892 893 893 mutex_lock(&monc->mutex); 894 894 had_debugfs_info = have_debugfs_info(monc); 895 - if (monc->auth->ops) 896 - was_auth = monc->auth->ops->is_authenticated(monc->auth); 895 + was_auth = ceph_auth_is_authenticated(monc->auth); 897 896 monc->pending_auth = 0; 898 897 ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base, 899 898 msg->front.iov_len, ··· 903 904 wake_up_all(&monc->client->auth_wq); 904 905 } else if (ret > 0) { 905 906 __send_prepared_auth_request(monc, ret); 906 - } else if (!was_auth && monc->auth->ops->is_authenticated(monc->auth)) { 907 + } else if (!was_auth && ceph_auth_is_authenticated(monc->auth)) { 907 908 dout("authenticated, starting session\n"); 908 909 909 910 monc->client->msgr.inst.name.type = CEPH_ENTITY_TYPE_CLIENT;
+744 -363
net/ceph/osd_client.c
··· 1 + 1 2 #include <linux/ceph/ceph_debug.h> 2 3 3 4 #include <linux/module.h> ··· 22 21 #define OSD_OP_FRONT_LEN 4096 23 22 #define OSD_OPREPLY_FRONT_LEN 512 24 23 24 + static struct kmem_cache *ceph_osd_request_cache; 25 + 25 26 static const struct ceph_connection_operations osd_con_ops; 26 27 27 28 static void __send_queued(struct ceph_osd_client *osdc); ··· 34 31 struct ceph_osd_request *req); 35 32 static void __send_request(struct ceph_osd_client *osdc, 36 33 struct ceph_osd_request *req); 37 - 38 - static int op_has_extent(int op) 39 - { 40 - return (op == CEPH_OSD_OP_READ || 41 - op == CEPH_OSD_OP_WRITE); 42 - } 43 34 44 35 /* 45 36 * Implement client access to distributed object storage cluster. ··· 60 63 * 61 64 * fill osd op in request message. 62 65 */ 63 - static int calc_layout(struct ceph_vino vino, 64 - struct ceph_file_layout *layout, 65 - u64 off, u64 *plen, 66 - struct ceph_osd_request *req, 67 - struct ceph_osd_req_op *op) 66 + static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen, 67 + u64 *objnum, u64 *objoff, u64 *objlen) 68 68 { 69 69 u64 orig_len = *plen; 70 - u64 bno = 0; 71 - u64 objoff = 0; 72 - u64 objlen = 0; 73 70 int r; 74 71 75 72 /* object extent? */ 76 - r = ceph_calc_file_object_mapping(layout, off, orig_len, &bno, 77 - &objoff, &objlen); 73 + r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum, 74 + objoff, objlen); 78 75 if (r < 0) 79 76 return r; 80 - if (objlen < orig_len) { 81 - *plen = objlen; 77 + if (*objlen < orig_len) { 78 + *plen = *objlen; 82 79 dout(" skipping last %llu, final file extent %llu~%llu\n", 83 80 orig_len - *plen, off, *plen); 84 81 } 85 82 86 - if (op_has_extent(op->op)) { 87 - u32 osize = le32_to_cpu(layout->fl_object_size); 88 - op->extent.offset = objoff; 89 - op->extent.length = objlen; 90 - if (op->extent.truncate_size <= off - objoff) { 91 - op->extent.truncate_size = 0; 92 - } else { 93 - op->extent.truncate_size -= off - objoff; 94 - if (op->extent.truncate_size > osize) 95 - op->extent.truncate_size = osize; 96 - } 83 + dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen); 84 + 85 + return 0; 86 + } 87 + 88 + static void ceph_osd_data_init(struct ceph_osd_data *osd_data) 89 + { 90 + memset(osd_data, 0, sizeof (*osd_data)); 91 + osd_data->type = CEPH_OSD_DATA_TYPE_NONE; 92 + } 93 + 94 + static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data, 95 + struct page **pages, u64 length, u32 alignment, 96 + bool pages_from_pool, bool own_pages) 97 + { 98 + osd_data->type = CEPH_OSD_DATA_TYPE_PAGES; 99 + osd_data->pages = pages; 100 + osd_data->length = length; 101 + osd_data->alignment = alignment; 102 + osd_data->pages_from_pool = pages_from_pool; 103 + osd_data->own_pages = own_pages; 104 + } 105 + 106 + static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data, 107 + struct ceph_pagelist *pagelist) 108 + { 109 + osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST; 110 + osd_data->pagelist = pagelist; 111 + } 112 + 113 + #ifdef CONFIG_BLOCK 114 + static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data, 115 + struct bio *bio, size_t bio_length) 116 + { 117 + osd_data->type = CEPH_OSD_DATA_TYPE_BIO; 118 + osd_data->bio = bio; 119 + osd_data->bio_length = bio_length; 120 + } 121 + #endif /* CONFIG_BLOCK */ 122 + 123 + #define osd_req_op_data(oreq, whch, typ, fld) \ 124 + ({ \ 125 + BUG_ON(whch >= (oreq)->r_num_ops); \ 126 + &(oreq)->r_ops[whch].typ.fld; \ 127 + }) 128 + 129 + static struct ceph_osd_data * 130 + osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which) 131 + { 132 + BUG_ON(which >= osd_req->r_num_ops); 133 + 134 + return &osd_req->r_ops[which].raw_data_in; 135 + } 136 + 137 + struct ceph_osd_data * 138 + osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req, 139 + unsigned int which) 140 + { 141 + return osd_req_op_data(osd_req, which, extent, osd_data); 142 + } 143 + EXPORT_SYMBOL(osd_req_op_extent_osd_data); 144 + 145 + struct ceph_osd_data * 146 + osd_req_op_cls_response_data(struct ceph_osd_request *osd_req, 147 + unsigned int which) 148 + { 149 + return osd_req_op_data(osd_req, which, cls, response_data); 150 + } 151 + EXPORT_SYMBOL(osd_req_op_cls_response_data); /* ??? */ 152 + 153 + void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req, 154 + unsigned int which, struct page **pages, 155 + u64 length, u32 alignment, 156 + bool pages_from_pool, bool own_pages) 157 + { 158 + struct ceph_osd_data *osd_data; 159 + 160 + osd_data = osd_req_op_raw_data_in(osd_req, which); 161 + ceph_osd_data_pages_init(osd_data, pages, length, alignment, 162 + pages_from_pool, own_pages); 163 + } 164 + EXPORT_SYMBOL(osd_req_op_raw_data_in_pages); 165 + 166 + void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req, 167 + unsigned int which, struct page **pages, 168 + u64 length, u32 alignment, 169 + bool pages_from_pool, bool own_pages) 170 + { 171 + struct ceph_osd_data *osd_data; 172 + 173 + osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 174 + ceph_osd_data_pages_init(osd_data, pages, length, alignment, 175 + pages_from_pool, own_pages); 176 + } 177 + EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages); 178 + 179 + void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req, 180 + unsigned int which, struct ceph_pagelist *pagelist) 181 + { 182 + struct ceph_osd_data *osd_data; 183 + 184 + osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 185 + ceph_osd_data_pagelist_init(osd_data, pagelist); 186 + } 187 + EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist); 188 + 189 + #ifdef CONFIG_BLOCK 190 + void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req, 191 + unsigned int which, struct bio *bio, size_t bio_length) 192 + { 193 + struct ceph_osd_data *osd_data; 194 + 195 + osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 196 + ceph_osd_data_bio_init(osd_data, bio, bio_length); 197 + } 198 + EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio); 199 + #endif /* CONFIG_BLOCK */ 200 + 201 + static void osd_req_op_cls_request_info_pagelist( 202 + struct ceph_osd_request *osd_req, 203 + unsigned int which, struct ceph_pagelist *pagelist) 204 + { 205 + struct ceph_osd_data *osd_data; 206 + 207 + osd_data = osd_req_op_data(osd_req, which, cls, request_info); 208 + ceph_osd_data_pagelist_init(osd_data, pagelist); 209 + } 210 + 211 + void osd_req_op_cls_request_data_pagelist( 212 + struct ceph_osd_request *osd_req, 213 + unsigned int which, struct ceph_pagelist *pagelist) 214 + { 215 + struct ceph_osd_data *osd_data; 216 + 217 + osd_data = osd_req_op_data(osd_req, which, cls, request_data); 218 + ceph_osd_data_pagelist_init(osd_data, pagelist); 219 + } 220 + EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist); 221 + 222 + void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req, 223 + unsigned int which, struct page **pages, u64 length, 224 + u32 alignment, bool pages_from_pool, bool own_pages) 225 + { 226 + struct ceph_osd_data *osd_data; 227 + 228 + osd_data = osd_req_op_data(osd_req, which, cls, request_data); 229 + ceph_osd_data_pages_init(osd_data, pages, length, alignment, 230 + pages_from_pool, own_pages); 231 + } 232 + EXPORT_SYMBOL(osd_req_op_cls_request_data_pages); 233 + 234 + void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req, 235 + unsigned int which, struct page **pages, u64 length, 236 + u32 alignment, bool pages_from_pool, bool own_pages) 237 + { 238 + struct ceph_osd_data *osd_data; 239 + 240 + osd_data = osd_req_op_data(osd_req, which, cls, response_data); 241 + ceph_osd_data_pages_init(osd_data, pages, length, alignment, 242 + pages_from_pool, own_pages); 243 + } 244 + EXPORT_SYMBOL(osd_req_op_cls_response_data_pages); 245 + 246 + static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data) 247 + { 248 + switch (osd_data->type) { 249 + case CEPH_OSD_DATA_TYPE_NONE: 250 + return 0; 251 + case CEPH_OSD_DATA_TYPE_PAGES: 252 + return osd_data->length; 253 + case CEPH_OSD_DATA_TYPE_PAGELIST: 254 + return (u64)osd_data->pagelist->length; 255 + #ifdef CONFIG_BLOCK 256 + case CEPH_OSD_DATA_TYPE_BIO: 257 + return (u64)osd_data->bio_length; 258 + #endif /* CONFIG_BLOCK */ 259 + default: 260 + WARN(true, "unrecognized data type %d\n", (int)osd_data->type); 261 + return 0; 97 262 } 98 - req->r_num_pages = calc_pages_for(off, *plen); 99 - req->r_page_alignment = off & ~PAGE_MASK; 100 - if (op->op == CEPH_OSD_OP_WRITE) 101 - op->payload_len = *plen; 263 + } 102 264 103 - dout("calc_layout bno=%llx %llu~%llu (%d pages)\n", 104 - bno, objoff, objlen, req->r_num_pages); 265 + static void ceph_osd_data_release(struct ceph_osd_data *osd_data) 266 + { 267 + if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) { 268 + int num_pages; 105 269 106 - snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno); 107 - req->r_oid_len = strlen(req->r_oid); 270 + num_pages = calc_pages_for((u64)osd_data->alignment, 271 + (u64)osd_data->length); 272 + ceph_release_page_vector(osd_data->pages, num_pages); 273 + } 274 + ceph_osd_data_init(osd_data); 275 + } 108 276 109 - return r; 277 + static void osd_req_op_data_release(struct ceph_osd_request *osd_req, 278 + unsigned int which) 279 + { 280 + struct ceph_osd_req_op *op; 281 + 282 + BUG_ON(which >= osd_req->r_num_ops); 283 + op = &osd_req->r_ops[which]; 284 + 285 + switch (op->op) { 286 + case CEPH_OSD_OP_READ: 287 + case CEPH_OSD_OP_WRITE: 288 + ceph_osd_data_release(&op->extent.osd_data); 289 + break; 290 + case CEPH_OSD_OP_CALL: 291 + ceph_osd_data_release(&op->cls.request_info); 292 + ceph_osd_data_release(&op->cls.request_data); 293 + ceph_osd_data_release(&op->cls.response_data); 294 + break; 295 + default: 296 + break; 297 + } 110 298 } 111 299 112 300 /* ··· 299 117 */ 300 118 void ceph_osdc_release_request(struct kref *kref) 301 119 { 302 - struct ceph_osd_request *req = container_of(kref, 303 - struct ceph_osd_request, 304 - r_kref); 120 + struct ceph_osd_request *req; 121 + unsigned int which; 305 122 123 + req = container_of(kref, struct ceph_osd_request, r_kref); 306 124 if (req->r_request) 307 125 ceph_msg_put(req->r_request); 308 - if (req->r_con_filling_msg) { 309 - dout("%s revoking msg %p from con %p\n", __func__, 310 - req->r_reply, req->r_con_filling_msg); 126 + if (req->r_reply) { 311 127 ceph_msg_revoke_incoming(req->r_reply); 312 - req->r_con_filling_msg->ops->put(req->r_con_filling_msg); 313 - req->r_con_filling_msg = NULL; 314 - } 315 - if (req->r_reply) 316 128 ceph_msg_put(req->r_reply); 317 - if (req->r_own_pages) 318 - ceph_release_page_vector(req->r_pages, 319 - req->r_num_pages); 129 + } 130 + 131 + for (which = 0; which < req->r_num_ops; which++) 132 + osd_req_op_data_release(req, which); 133 + 320 134 ceph_put_snap_context(req->r_snapc); 321 - ceph_pagelist_release(&req->r_trail); 322 135 if (req->r_mempool) 323 136 mempool_free(req, req->r_osdc->req_mempool); 324 137 else 325 - kfree(req); 138 + kmem_cache_free(ceph_osd_request_cache, req); 139 + 326 140 } 327 141 EXPORT_SYMBOL(ceph_osdc_release_request); 328 142 ··· 331 153 struct ceph_osd_request *req; 332 154 struct ceph_msg *msg; 333 155 size_t msg_size; 156 + 157 + BUILD_BUG_ON(CEPH_OSD_MAX_OP > U16_MAX); 158 + BUG_ON(num_ops > CEPH_OSD_MAX_OP); 334 159 335 160 msg_size = 4 + 4 + 8 + 8 + 4+8; 336 161 msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */ ··· 349 168 req = mempool_alloc(osdc->req_mempool, gfp_flags); 350 169 memset(req, 0, sizeof(*req)); 351 170 } else { 352 - req = kzalloc(sizeof(*req), gfp_flags); 171 + req = kmem_cache_zalloc(ceph_osd_request_cache, gfp_flags); 353 172 } 354 173 if (req == NULL) 355 174 return NULL; 356 175 357 176 req->r_osdc = osdc; 358 177 req->r_mempool = use_mempool; 178 + req->r_num_ops = num_ops; 359 179 360 180 kref_init(&req->r_kref); 361 181 init_completion(&req->r_completion); ··· 380 198 } 381 199 req->r_reply = msg; 382 200 383 - ceph_pagelist_init(&req->r_trail); 384 - 385 201 /* create request message; allow space for oid */ 386 202 if (use_mempool) 387 203 msg = ceph_msgpool_get(&osdc->msgpool_op, 0); ··· 398 218 } 399 219 EXPORT_SYMBOL(ceph_osdc_alloc_request); 400 220 401 - static void osd_req_encode_op(struct ceph_osd_request *req, 402 - struct ceph_osd_op *dst, 403 - struct ceph_osd_req_op *src) 221 + static bool osd_req_opcode_valid(u16 opcode) 404 222 { 405 - dst->op = cpu_to_le16(src->op); 406 - 407 - switch (src->op) { 408 - case CEPH_OSD_OP_STAT: 409 - break; 223 + switch (opcode) { 410 224 case CEPH_OSD_OP_READ: 411 - case CEPH_OSD_OP_WRITE: 412 - dst->extent.offset = 413 - cpu_to_le64(src->extent.offset); 414 - dst->extent.length = 415 - cpu_to_le64(src->extent.length); 416 - dst->extent.truncate_size = 417 - cpu_to_le64(src->extent.truncate_size); 418 - dst->extent.truncate_seq = 419 - cpu_to_le32(src->extent.truncate_seq); 420 - break; 421 - case CEPH_OSD_OP_CALL: 422 - dst->cls.class_len = src->cls.class_len; 423 - dst->cls.method_len = src->cls.method_len; 424 - dst->cls.indata_len = cpu_to_le32(src->cls.indata_len); 425 - 426 - ceph_pagelist_append(&req->r_trail, src->cls.class_name, 427 - src->cls.class_len); 428 - ceph_pagelist_append(&req->r_trail, src->cls.method_name, 429 - src->cls.method_len); 430 - ceph_pagelist_append(&req->r_trail, src->cls.indata, 431 - src->cls.indata_len); 432 - break; 433 - case CEPH_OSD_OP_STARTSYNC: 434 - break; 435 - case CEPH_OSD_OP_NOTIFY_ACK: 436 - case CEPH_OSD_OP_WATCH: 437 - dst->watch.cookie = cpu_to_le64(src->watch.cookie); 438 - dst->watch.ver = cpu_to_le64(src->watch.ver); 439 - dst->watch.flag = src->watch.flag; 440 - break; 441 - default: 442 - pr_err("unrecognized osd opcode %d\n", dst->op); 443 - WARN_ON(1); 444 - break; 225 + case CEPH_OSD_OP_STAT: 445 226 case CEPH_OSD_OP_MAPEXT: 446 227 case CEPH_OSD_OP_MASKTRUNC: 447 228 case CEPH_OSD_OP_SPARSE_READ: 448 229 case CEPH_OSD_OP_NOTIFY: 230 + case CEPH_OSD_OP_NOTIFY_ACK: 449 231 case CEPH_OSD_OP_ASSERT_VER: 232 + case CEPH_OSD_OP_WRITE: 450 233 case CEPH_OSD_OP_WRITEFULL: 451 234 case CEPH_OSD_OP_TRUNCATE: 452 235 case CEPH_OSD_OP_ZERO: 453 236 case CEPH_OSD_OP_DELETE: 454 237 case CEPH_OSD_OP_APPEND: 238 + case CEPH_OSD_OP_STARTSYNC: 455 239 case CEPH_OSD_OP_SETTRUNC: 456 240 case CEPH_OSD_OP_TRIMTRUNC: 457 241 case CEPH_OSD_OP_TMAPUP: ··· 423 279 case CEPH_OSD_OP_TMAPGET: 424 280 case CEPH_OSD_OP_CREATE: 425 281 case CEPH_OSD_OP_ROLLBACK: 282 + case CEPH_OSD_OP_WATCH: 426 283 case CEPH_OSD_OP_OMAPGETKEYS: 427 284 case CEPH_OSD_OP_OMAPGETVALS: 428 285 case CEPH_OSD_OP_OMAPGETHEADER: 429 286 case CEPH_OSD_OP_OMAPGETVALSBYKEYS: 430 - case CEPH_OSD_OP_MODE_RD: 431 287 case CEPH_OSD_OP_OMAPSETVALS: 432 288 case CEPH_OSD_OP_OMAPSETHEADER: 433 289 case CEPH_OSD_OP_OMAPCLEAR: ··· 458 314 case CEPH_OSD_OP_RDUNLOCK: 459 315 case CEPH_OSD_OP_UPLOCK: 460 316 case CEPH_OSD_OP_DNLOCK: 317 + case CEPH_OSD_OP_CALL: 461 318 case CEPH_OSD_OP_PGLS: 462 319 case CEPH_OSD_OP_PGLS_FILTER: 463 - pr_err("unsupported osd opcode %s\n", 464 - ceph_osd_op_name(dst->op)); 465 - WARN_ON(1); 466 - break; 320 + return true; 321 + default: 322 + return false; 467 323 } 468 - dst->payload_len = cpu_to_le32(src->payload_len); 469 324 } 470 325 471 326 /* 472 - * build new request AND message 473 - * 327 + * This is an osd op init function for opcodes that have no data or 328 + * other information associated with them. It also serves as a 329 + * common init routine for all the other init functions, below. 474 330 */ 475 - void ceph_osdc_build_request(struct ceph_osd_request *req, 476 - u64 off, u64 len, unsigned int num_ops, 477 - struct ceph_osd_req_op *src_ops, 478 - struct ceph_snap_context *snapc, u64 snap_id, 479 - struct timespec *mtime) 331 + static struct ceph_osd_req_op * 332 + _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, 333 + u16 opcode) 480 334 { 481 - struct ceph_msg *msg = req->r_request; 482 - struct ceph_osd_req_op *src_op; 483 - void *p; 484 - size_t msg_size; 485 - int flags = req->r_flags; 486 - u64 data_len; 487 - int i; 335 + struct ceph_osd_req_op *op; 488 336 489 - req->r_num_ops = num_ops; 490 - req->r_snapid = snap_id; 491 - req->r_snapc = ceph_get_snap_context(snapc); 337 + BUG_ON(which >= osd_req->r_num_ops); 338 + BUG_ON(!osd_req_opcode_valid(opcode)); 492 339 493 - /* encode request */ 494 - msg->hdr.version = cpu_to_le16(4); 340 + op = &osd_req->r_ops[which]; 341 + memset(op, 0, sizeof (*op)); 342 + op->op = opcode; 495 343 496 - p = msg->front.iov_base; 497 - ceph_encode_32(&p, 1); /* client_inc is always 1 */ 498 - req->r_request_osdmap_epoch = p; 499 - p += 4; 500 - req->r_request_flags = p; 501 - p += 4; 502 - if (req->r_flags & CEPH_OSD_FLAG_WRITE) 503 - ceph_encode_timespec(p, mtime); 504 - p += sizeof(struct ceph_timespec); 505 - req->r_request_reassert_version = p; 506 - p += sizeof(struct ceph_eversion); /* will get filled in */ 507 - 508 - /* oloc */ 509 - ceph_encode_8(&p, 4); 510 - ceph_encode_8(&p, 4); 511 - ceph_encode_32(&p, 8 + 4 + 4); 512 - req->r_request_pool = p; 513 - p += 8; 514 - ceph_encode_32(&p, -1); /* preferred */ 515 - ceph_encode_32(&p, 0); /* key len */ 516 - 517 - ceph_encode_8(&p, 1); 518 - req->r_request_pgid = p; 519 - p += 8 + 4; 520 - ceph_encode_32(&p, -1); /* preferred */ 521 - 522 - /* oid */ 523 - ceph_encode_32(&p, req->r_oid_len); 524 - memcpy(p, req->r_oid, req->r_oid_len); 525 - dout("oid '%.*s' len %d\n", req->r_oid_len, req->r_oid, req->r_oid_len); 526 - p += req->r_oid_len; 527 - 528 - /* ops */ 529 - ceph_encode_16(&p, num_ops); 530 - src_op = src_ops; 531 - req->r_request_ops = p; 532 - for (i = 0; i < num_ops; i++, src_op++) { 533 - osd_req_encode_op(req, p, src_op); 534 - p += sizeof(struct ceph_osd_op); 535 - } 536 - 537 - /* snaps */ 538 - ceph_encode_64(&p, req->r_snapid); 539 - ceph_encode_64(&p, req->r_snapc ? req->r_snapc->seq : 0); 540 - ceph_encode_32(&p, req->r_snapc ? req->r_snapc->num_snaps : 0); 541 - if (req->r_snapc) { 542 - for (i = 0; i < snapc->num_snaps; i++) { 543 - ceph_encode_64(&p, req->r_snapc->snaps[i]); 544 - } 545 - } 546 - 547 - req->r_request_attempts = p; 548 - p += 4; 549 - 550 - data_len = req->r_trail.length; 551 - if (flags & CEPH_OSD_FLAG_WRITE) { 552 - req->r_request->hdr.data_off = cpu_to_le16(off); 553 - data_len += len; 554 - } 555 - req->r_request->hdr.data_len = cpu_to_le32(data_len); 556 - req->r_request->page_alignment = req->r_page_alignment; 557 - 558 - BUG_ON(p > msg->front.iov_base + msg->front.iov_len); 559 - msg_size = p - msg->front.iov_base; 560 - msg->front.iov_len = msg_size; 561 - msg->hdr.front_len = cpu_to_le32(msg_size); 562 - 563 - dout("build_request msg_size was %d num_ops %d\n", (int)msg_size, 564 - num_ops); 565 - return; 344 + return op; 566 345 } 567 - EXPORT_SYMBOL(ceph_osdc_build_request); 346 + 347 + void osd_req_op_init(struct ceph_osd_request *osd_req, 348 + unsigned int which, u16 opcode) 349 + { 350 + (void)_osd_req_op_init(osd_req, which, opcode); 351 + } 352 + EXPORT_SYMBOL(osd_req_op_init); 353 + 354 + void osd_req_op_extent_init(struct ceph_osd_request *osd_req, 355 + unsigned int which, u16 opcode, 356 + u64 offset, u64 length, 357 + u64 truncate_size, u32 truncate_seq) 358 + { 359 + struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode); 360 + size_t payload_len = 0; 361 + 362 + BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE); 363 + 364 + op->extent.offset = offset; 365 + op->extent.length = length; 366 + op->extent.truncate_size = truncate_size; 367 + op->extent.truncate_seq = truncate_seq; 368 + if (opcode == CEPH_OSD_OP_WRITE) 369 + payload_len += length; 370 + 371 + op->payload_len = payload_len; 372 + } 373 + EXPORT_SYMBOL(osd_req_op_extent_init); 374 + 375 + void osd_req_op_extent_update(struct ceph_osd_request *osd_req, 376 + unsigned int which, u64 length) 377 + { 378 + struct ceph_osd_req_op *op; 379 + u64 previous; 380 + 381 + BUG_ON(which >= osd_req->r_num_ops); 382 + op = &osd_req->r_ops[which]; 383 + previous = op->extent.length; 384 + 385 + if (length == previous) 386 + return; /* Nothing to do */ 387 + BUG_ON(length > previous); 388 + 389 + op->extent.length = length; 390 + op->payload_len -= previous - length; 391 + } 392 + EXPORT_SYMBOL(osd_req_op_extent_update); 393 + 394 + void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, 395 + u16 opcode, const char *class, const char *method) 396 + { 397 + struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode); 398 + struct ceph_pagelist *pagelist; 399 + size_t payload_len = 0; 400 + size_t size; 401 + 402 + BUG_ON(opcode != CEPH_OSD_OP_CALL); 403 + 404 + pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS); 405 + BUG_ON(!pagelist); 406 + ceph_pagelist_init(pagelist); 407 + 408 + op->cls.class_name = class; 409 + size = strlen(class); 410 + BUG_ON(size > (size_t) U8_MAX); 411 + op->cls.class_len = size; 412 + ceph_pagelist_append(pagelist, class, size); 413 + payload_len += size; 414 + 415 + op->cls.method_name = method; 416 + size = strlen(method); 417 + BUG_ON(size > (size_t) U8_MAX); 418 + op->cls.method_len = size; 419 + ceph_pagelist_append(pagelist, method, size); 420 + payload_len += size; 421 + 422 + osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist); 423 + 424 + op->cls.argc = 0; /* currently unused */ 425 + 426 + op->payload_len = payload_len; 427 + } 428 + EXPORT_SYMBOL(osd_req_op_cls_init); 429 + 430 + void osd_req_op_watch_init(struct ceph_osd_request *osd_req, 431 + unsigned int which, u16 opcode, 432 + u64 cookie, u64 version, int flag) 433 + { 434 + struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode); 435 + 436 + BUG_ON(opcode != CEPH_OSD_OP_NOTIFY_ACK && opcode != CEPH_OSD_OP_WATCH); 437 + 438 + op->watch.cookie = cookie; 439 + op->watch.ver = version; 440 + if (opcode == CEPH_OSD_OP_WATCH && flag) 441 + op->watch.flag = (u8)1; 442 + } 443 + EXPORT_SYMBOL(osd_req_op_watch_init); 444 + 445 + static void ceph_osdc_msg_data_add(struct ceph_msg *msg, 446 + struct ceph_osd_data *osd_data) 447 + { 448 + u64 length = ceph_osd_data_length(osd_data); 449 + 450 + if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { 451 + BUG_ON(length > (u64) SIZE_MAX); 452 + if (length) 453 + ceph_msg_data_add_pages(msg, osd_data->pages, 454 + length, osd_data->alignment); 455 + } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) { 456 + BUG_ON(!length); 457 + ceph_msg_data_add_pagelist(msg, osd_data->pagelist); 458 + #ifdef CONFIG_BLOCK 459 + } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { 460 + ceph_msg_data_add_bio(msg, osd_data->bio, length); 461 + #endif 462 + } else { 463 + BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE); 464 + } 465 + } 466 + 467 + static u64 osd_req_encode_op(struct ceph_osd_request *req, 468 + struct ceph_osd_op *dst, unsigned int which) 469 + { 470 + struct ceph_osd_req_op *src; 471 + struct ceph_osd_data *osd_data; 472 + u64 request_data_len = 0; 473 + u64 data_length; 474 + 475 + BUG_ON(which >= req->r_num_ops); 476 + src = &req->r_ops[which]; 477 + if (WARN_ON(!osd_req_opcode_valid(src->op))) { 478 + pr_err("unrecognized osd opcode %d\n", src->op); 479 + 480 + return 0; 481 + } 482 + 483 + switch (src->op) { 484 + case CEPH_OSD_OP_STAT: 485 + osd_data = &src->raw_data_in; 486 + ceph_osdc_msg_data_add(req->r_reply, osd_data); 487 + break; 488 + case CEPH_OSD_OP_READ: 489 + case CEPH_OSD_OP_WRITE: 490 + if (src->op == CEPH_OSD_OP_WRITE) 491 + request_data_len = src->extent.length; 492 + dst->extent.offset = cpu_to_le64(src->extent.offset); 493 + dst->extent.length = cpu_to_le64(src->extent.length); 494 + dst->extent.truncate_size = 495 + cpu_to_le64(src->extent.truncate_size); 496 + dst->extent.truncate_seq = 497 + cpu_to_le32(src->extent.truncate_seq); 498 + osd_data = &src->extent.osd_data; 499 + if (src->op == CEPH_OSD_OP_WRITE) 500 + ceph_osdc_msg_data_add(req->r_request, osd_data); 501 + else 502 + ceph_osdc_msg_data_add(req->r_reply, osd_data); 503 + break; 504 + case CEPH_OSD_OP_CALL: 505 + dst->cls.class_len = src->cls.class_len; 506 + dst->cls.method_len = src->cls.method_len; 507 + osd_data = &src->cls.request_info; 508 + ceph_osdc_msg_data_add(req->r_request, osd_data); 509 + BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGELIST); 510 + request_data_len = osd_data->pagelist->length; 511 + 512 + osd_data = &src->cls.request_data; 513 + data_length = ceph_osd_data_length(osd_data); 514 + if (data_length) { 515 + BUG_ON(osd_data->type == CEPH_OSD_DATA_TYPE_NONE); 516 + dst->cls.indata_len = cpu_to_le32(data_length); 517 + ceph_osdc_msg_data_add(req->r_request, osd_data); 518 + src->payload_len += data_length; 519 + request_data_len += data_length; 520 + } 521 + osd_data = &src->cls.response_data; 522 + ceph_osdc_msg_data_add(req->r_reply, osd_data); 523 + break; 524 + case CEPH_OSD_OP_STARTSYNC: 525 + break; 526 + case CEPH_OSD_OP_NOTIFY_ACK: 527 + case CEPH_OSD_OP_WATCH: 528 + dst->watch.cookie = cpu_to_le64(src->watch.cookie); 529 + dst->watch.ver = cpu_to_le64(src->watch.ver); 530 + dst->watch.flag = src->watch.flag; 531 + break; 532 + default: 533 + pr_err("unsupported osd opcode %s\n", 534 + ceph_osd_op_name(src->op)); 535 + WARN_ON(1); 536 + 537 + return 0; 538 + } 539 + dst->op = cpu_to_le16(src->op); 540 + dst->payload_len = cpu_to_le32(src->payload_len); 541 + 542 + return request_data_len; 543 + } 568 544 569 545 /* 570 546 * build new request AND message, calculate layout, and adjust file ··· 700 436 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, 701 437 struct ceph_file_layout *layout, 702 438 struct ceph_vino vino, 703 - u64 off, u64 *plen, 439 + u64 off, u64 *plen, int num_ops, 704 440 int opcode, int flags, 705 441 struct ceph_snap_context *snapc, 706 - int do_sync, 707 442 u32 truncate_seq, 708 443 u64 truncate_size, 709 - struct timespec *mtime, 710 - bool use_mempool, 711 - int page_align) 444 + bool use_mempool) 712 445 { 713 - struct ceph_osd_req_op ops[2]; 714 446 struct ceph_osd_request *req; 715 - unsigned int num_op = 1; 447 + u64 objnum = 0; 448 + u64 objoff = 0; 449 + u64 objlen = 0; 450 + u32 object_size; 451 + u64 object_base; 716 452 int r; 717 453 718 - memset(&ops, 0, sizeof ops); 454 + BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE); 719 455 720 - ops[0].op = opcode; 721 - ops[0].extent.truncate_seq = truncate_seq; 722 - ops[0].extent.truncate_size = truncate_size; 723 - 724 - if (do_sync) { 725 - ops[1].op = CEPH_OSD_OP_STARTSYNC; 726 - num_op++; 727 - } 728 - 729 - req = ceph_osdc_alloc_request(osdc, snapc, num_op, use_mempool, 456 + req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool, 730 457 GFP_NOFS); 731 458 if (!req) 732 459 return ERR_PTR(-ENOMEM); 460 + 733 461 req->r_flags = flags; 734 462 735 463 /* calculate max write size */ 736 - r = calc_layout(vino, layout, off, plen, req, ops); 737 - if (r < 0) 464 + r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen); 465 + if (r < 0) { 466 + ceph_osdc_put_request(req); 738 467 return ERR_PTR(r); 468 + } 469 + 470 + object_size = le32_to_cpu(layout->fl_object_size); 471 + object_base = off - objoff; 472 + if (truncate_size <= object_base) { 473 + truncate_size = 0; 474 + } else { 475 + truncate_size -= object_base; 476 + if (truncate_size > object_size) 477 + truncate_size = object_size; 478 + } 479 + 480 + osd_req_op_extent_init(req, 0, opcode, objoff, objlen, 481 + truncate_size, truncate_seq); 482 + 483 + /* 484 + * A second op in the ops array means the caller wants to 485 + * also issue a include a 'startsync' command so that the 486 + * osd will flush data quickly. 487 + */ 488 + if (num_ops > 1) 489 + osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC); 490 + 739 491 req->r_file_layout = *layout; /* keep a copy */ 740 492 741 - /* in case it differs from natural (file) alignment that 742 - calc_layout filled in for us */ 743 - req->r_num_pages = calc_pages_for(page_align, *plen); 744 - req->r_page_alignment = page_align; 745 - 746 - ceph_osdc_build_request(req, off, *plen, num_op, ops, 747 - snapc, vino.snap, mtime); 493 + snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", 494 + vino.ino, objnum); 495 + req->r_oid_len = strlen(req->r_oid); 748 496 749 497 return req; 750 498 } ··· 834 558 struct ceph_osd *osd) 835 559 { 836 560 struct ceph_osd_request *req, *nreq; 561 + LIST_HEAD(resend); 837 562 int err; 838 563 839 564 dout("__kick_osd_requests osd%d\n", osd->o_osd); 840 565 err = __reset_osd(osdc, osd); 841 566 if (err) 842 567 return; 843 - 568 + /* 569 + * Build up a list of requests to resend by traversing the 570 + * osd's list of requests. Requests for a given object are 571 + * sent in tid order, and that is also the order they're 572 + * kept on this list. Therefore all requests that are in 573 + * flight will be found first, followed by all requests that 574 + * have not yet been sent. And to resend requests while 575 + * preserving this order we will want to put any sent 576 + * requests back on the front of the osd client's unsent 577 + * list. 578 + * 579 + * So we build a separate ordered list of already-sent 580 + * requests for the affected osd and splice it onto the 581 + * front of the osd client's unsent list. Once we've seen a 582 + * request that has not yet been sent we're done. Those 583 + * requests are already sitting right where they belong. 584 + */ 844 585 list_for_each_entry(req, &osd->o_requests, r_osd_item) { 845 - list_move(&req->r_req_lru_item, &osdc->req_unsent); 846 - dout("requeued %p tid %llu osd%d\n", req, req->r_tid, 586 + if (!req->r_sent) 587 + break; 588 + list_move_tail(&req->r_req_lru_item, &resend); 589 + dout("requeueing %p tid %llu osd%d\n", req, req->r_tid, 847 590 osd->o_osd); 848 591 if (!req->r_linger) 849 592 req->r_flags |= CEPH_OSD_FLAG_RETRY; 850 593 } 594 + list_splice(&resend, &osdc->req_unsent); 851 595 596 + /* 597 + * Linger requests are re-registered before sending, which 598 + * sets up a new tid for each. We add them to the unsent 599 + * list at the end to keep things in tid order. 600 + */ 852 601 list_for_each_entry_safe(req, nreq, &osd->o_linger_requests, 853 602 r_linger_osd) { 854 603 /* ··· 882 581 */ 883 582 BUG_ON(!list_empty(&req->r_req_lru_item)); 884 583 __register_request(osdc, req); 885 - list_add(&req->r_req_lru_item, &osdc->req_unsent); 886 - list_add(&req->r_osd_item, &req->r_osd->o_requests); 584 + list_add_tail(&req->r_req_lru_item, &osdc->req_unsent); 585 + list_add_tail(&req->r_osd_item, &req->r_osd->o_requests); 887 586 __unregister_linger_request(osdc, req); 888 587 dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid, 889 588 osd->o_osd); ··· 955 654 if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) { 956 655 struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth; 957 656 958 - if (ac->ops && ac->ops->destroy_authorizer) 959 - ac->ops->destroy_authorizer(ac, osd->o_auth.authorizer); 657 + ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer); 960 658 kfree(osd); 961 659 } 962 660 } ··· 1120 820 } 1121 821 } 1122 822 1123 - static void register_request(struct ceph_osd_client *osdc, 1124 - struct ceph_osd_request *req) 1125 - { 1126 - mutex_lock(&osdc->request_mutex); 1127 - __register_request(osdc, req); 1128 - mutex_unlock(&osdc->request_mutex); 1129 - } 1130 - 1131 823 /* 1132 824 * called under osdc->request_mutex 1133 825 */ ··· 1244 952 int err; 1245 953 1246 954 dout("map_request %p tid %lld\n", req, req->r_tid); 1247 - err = ceph_calc_object_layout(&pgid, req->r_oid, 1248 - &req->r_file_layout, osdc->osdmap); 955 + err = ceph_calc_ceph_pg(&pgid, req->r_oid, osdc->osdmap, 956 + ceph_file_layout_pg_pool(req->r_file_layout)); 1249 957 if (err) { 1250 958 list_move(&req->r_req_lru_item, &osdc->req_notarget); 1251 959 return err; ··· 1299 1007 1300 1008 if (req->r_osd) { 1301 1009 __remove_osd_from_lru(req->r_osd); 1302 - list_add(&req->r_osd_item, &req->r_osd->o_requests); 1303 - list_move(&req->r_req_lru_item, &osdc->req_unsent); 1010 + list_add_tail(&req->r_osd_item, &req->r_osd->o_requests); 1011 + list_move_tail(&req->r_req_lru_item, &osdc->req_unsent); 1304 1012 } else { 1305 - list_move(&req->r_req_lru_item, &osdc->req_notarget); 1013 + list_move_tail(&req->r_req_lru_item, &osdc->req_notarget); 1306 1014 } 1307 1015 err = 1; /* osd or pg changed */ 1308 1016 ··· 1337 1045 list_move_tail(&req->r_req_lru_item, &osdc->req_lru); 1338 1046 1339 1047 ceph_msg_get(req->r_request); /* send consumes a ref */ 1340 - ceph_con_send(&req->r_osd->o_con, req->r_request); 1048 + 1049 + /* Mark the request unsafe if this is the first timet's being sent. */ 1050 + 1051 + if (!req->r_sent && req->r_unsafe_callback) 1052 + req->r_unsafe_callback(req, true); 1341 1053 req->r_sent = req->r_osd->o_incarnation; 1054 + 1055 + ceph_con_send(&req->r_osd->o_con, req->r_request); 1342 1056 } 1343 1057 1344 1058 /* ··· 1432 1134 1433 1135 static void complete_request(struct ceph_osd_request *req) 1434 1136 { 1435 - if (req->r_safe_callback) 1436 - req->r_safe_callback(req, NULL); 1137 + if (req->r_unsafe_callback) 1138 + req->r_unsafe_callback(req, false); 1437 1139 complete_all(&req->r_safe_completion); /* fsync waiter */ 1438 - } 1439 - 1440 - static int __decode_pgid(void **p, void *end, struct ceph_pg *pgid) 1441 - { 1442 - __u8 v; 1443 - 1444 - ceph_decode_need(p, end, 1 + 8 + 4 + 4, bad); 1445 - v = ceph_decode_8(p); 1446 - if (v > 1) { 1447 - pr_warning("do not understand pg encoding %d > 1", v); 1448 - return -EINVAL; 1449 - } 1450 - pgid->pool = ceph_decode_64(p); 1451 - pgid->seed = ceph_decode_32(p); 1452 - *p += 4; 1453 - return 0; 1454 - 1455 - bad: 1456 - pr_warning("incomplete pg encoding"); 1457 - return -EINVAL; 1458 1140 } 1459 1141 1460 1142 /* ··· 1448 1170 struct ceph_osd_request *req; 1449 1171 u64 tid; 1450 1172 int object_len; 1451 - int numops, payload_len, flags; 1173 + unsigned int numops; 1174 + int payload_len, flags; 1452 1175 s32 result; 1453 1176 s32 retry_attempt; 1454 1177 struct ceph_pg pg; ··· 1457 1178 u32 reassert_epoch; 1458 1179 u64 reassert_version; 1459 1180 u32 osdmap_epoch; 1460 - int i; 1181 + int already_completed; 1182 + u32 bytes; 1183 + unsigned int i; 1461 1184 1462 1185 tid = le64_to_cpu(msg->hdr.tid); 1463 1186 dout("handle_reply %p tid %llu\n", msg, tid); ··· 1472 1191 ceph_decode_need(&p, end, object_len, bad); 1473 1192 p += object_len; 1474 1193 1475 - err = __decode_pgid(&p, end, &pg); 1194 + err = ceph_decode_pgid(&p, end, &pg); 1476 1195 if (err) 1477 1196 goto bad; 1478 1197 ··· 1488 1207 req = __lookup_request(osdc, tid); 1489 1208 if (req == NULL) { 1490 1209 dout("handle_reply tid %llu dne\n", tid); 1491 - mutex_unlock(&osdc->request_mutex); 1492 - return; 1210 + goto bad_mutex; 1493 1211 } 1494 1212 ceph_osdc_get_request(req); 1495 1213 ··· 1513 1233 payload_len += len; 1514 1234 p += sizeof(*op); 1515 1235 } 1516 - if (payload_len != le32_to_cpu(msg->hdr.data_len)) { 1236 + bytes = le32_to_cpu(msg->hdr.data_len); 1237 + if (payload_len != bytes) { 1517 1238 pr_warning("sum of op payload lens %d != data_len %d", 1518 - payload_len, le32_to_cpu(msg->hdr.data_len)); 1239 + payload_len, bytes); 1519 1240 goto bad_put; 1520 1241 } 1521 1242 ··· 1525 1244 for (i = 0; i < numops; i++) 1526 1245 req->r_reply_op_result[i] = ceph_decode_32(&p); 1527 1246 1528 - /* 1529 - * if this connection filled our message, drop our reference now, to 1530 - * avoid a (safe but slower) revoke later. 1531 - */ 1532 - if (req->r_con_filling_msg == con && req->r_reply == msg) { 1533 - dout(" dropping con_filling_msg ref %p\n", con); 1534 - req->r_con_filling_msg = NULL; 1535 - con->ops->put(con); 1536 - } 1537 - 1538 1247 if (!req->r_got_reply) { 1539 - unsigned int bytes; 1540 1248 1541 1249 req->r_result = result; 1542 - bytes = le32_to_cpu(msg->hdr.data_len); 1543 1250 dout("handle_reply result %d bytes %d\n", req->r_result, 1544 1251 bytes); 1545 1252 if (req->r_result == 0) ··· 1555 1286 ((flags & CEPH_OSD_FLAG_WRITE) == 0)) 1556 1287 __unregister_request(osdc, req); 1557 1288 1289 + already_completed = req->r_completed; 1290 + req->r_completed = 1; 1558 1291 mutex_unlock(&osdc->request_mutex); 1292 + if (already_completed) 1293 + goto done; 1559 1294 1560 1295 if (req->r_callback) 1561 1296 req->r_callback(req, msg); ··· 1576 1303 1577 1304 bad_put: 1578 1305 ceph_osdc_put_request(req); 1306 + bad_mutex: 1307 + mutex_unlock(&osdc->request_mutex); 1579 1308 bad: 1580 1309 pr_err("corrupt osd_op_reply got %d %d\n", 1581 1310 (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len)); ··· 2011 1736 } 2012 1737 2013 1738 /* 1739 + * build new request AND message 1740 + * 1741 + */ 1742 + void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off, 1743 + struct ceph_snap_context *snapc, u64 snap_id, 1744 + struct timespec *mtime) 1745 + { 1746 + struct ceph_msg *msg = req->r_request; 1747 + void *p; 1748 + size_t msg_size; 1749 + int flags = req->r_flags; 1750 + u64 data_len; 1751 + unsigned int i; 1752 + 1753 + req->r_snapid = snap_id; 1754 + req->r_snapc = ceph_get_snap_context(snapc); 1755 + 1756 + /* encode request */ 1757 + msg->hdr.version = cpu_to_le16(4); 1758 + 1759 + p = msg->front.iov_base; 1760 + ceph_encode_32(&p, 1); /* client_inc is always 1 */ 1761 + req->r_request_osdmap_epoch = p; 1762 + p += 4; 1763 + req->r_request_flags = p; 1764 + p += 4; 1765 + if (req->r_flags & CEPH_OSD_FLAG_WRITE) 1766 + ceph_encode_timespec(p, mtime); 1767 + p += sizeof(struct ceph_timespec); 1768 + req->r_request_reassert_version = p; 1769 + p += sizeof(struct ceph_eversion); /* will get filled in */ 1770 + 1771 + /* oloc */ 1772 + ceph_encode_8(&p, 4); 1773 + ceph_encode_8(&p, 4); 1774 + ceph_encode_32(&p, 8 + 4 + 4); 1775 + req->r_request_pool = p; 1776 + p += 8; 1777 + ceph_encode_32(&p, -1); /* preferred */ 1778 + ceph_encode_32(&p, 0); /* key len */ 1779 + 1780 + ceph_encode_8(&p, 1); 1781 + req->r_request_pgid = p; 1782 + p += 8 + 4; 1783 + ceph_encode_32(&p, -1); /* preferred */ 1784 + 1785 + /* oid */ 1786 + ceph_encode_32(&p, req->r_oid_len); 1787 + memcpy(p, req->r_oid, req->r_oid_len); 1788 + dout("oid '%.*s' len %d\n", req->r_oid_len, req->r_oid, req->r_oid_len); 1789 + p += req->r_oid_len; 1790 + 1791 + /* ops--can imply data */ 1792 + ceph_encode_16(&p, (u16)req->r_num_ops); 1793 + data_len = 0; 1794 + for (i = 0; i < req->r_num_ops; i++) { 1795 + data_len += osd_req_encode_op(req, p, i); 1796 + p += sizeof(struct ceph_osd_op); 1797 + } 1798 + 1799 + /* snaps */ 1800 + ceph_encode_64(&p, req->r_snapid); 1801 + ceph_encode_64(&p, req->r_snapc ? req->r_snapc->seq : 0); 1802 + ceph_encode_32(&p, req->r_snapc ? req->r_snapc->num_snaps : 0); 1803 + if (req->r_snapc) { 1804 + for (i = 0; i < snapc->num_snaps; i++) { 1805 + ceph_encode_64(&p, req->r_snapc->snaps[i]); 1806 + } 1807 + } 1808 + 1809 + req->r_request_attempts = p; 1810 + p += 4; 1811 + 1812 + /* data */ 1813 + if (flags & CEPH_OSD_FLAG_WRITE) { 1814 + u16 data_off; 1815 + 1816 + /* 1817 + * The header "data_off" is a hint to the receiver 1818 + * allowing it to align received data into its 1819 + * buffers such that there's no need to re-copy 1820 + * it before writing it to disk (direct I/O). 1821 + */ 1822 + data_off = (u16) (off & 0xffff); 1823 + req->r_request->hdr.data_off = cpu_to_le16(data_off); 1824 + } 1825 + req->r_request->hdr.data_len = cpu_to_le32(data_len); 1826 + 1827 + BUG_ON(p > msg->front.iov_base + msg->front.iov_len); 1828 + msg_size = p - msg->front.iov_base; 1829 + msg->front.iov_len = msg_size; 1830 + msg->hdr.front_len = cpu_to_le32(msg_size); 1831 + 1832 + dout("build_request msg_size was %d\n", (int)msg_size); 1833 + } 1834 + EXPORT_SYMBOL(ceph_osdc_build_request); 1835 + 1836 + /* 2014 1837 * Register request, send initial attempt. 2015 1838 */ 2016 1839 int ceph_osdc_start_request(struct ceph_osd_client *osdc, ··· 2117 1744 { 2118 1745 int rc = 0; 2119 1746 2120 - req->r_request->pages = req->r_pages; 2121 - req->r_request->nr_pages = req->r_num_pages; 2122 - #ifdef CONFIG_BLOCK 2123 - req->r_request->bio = req->r_bio; 2124 - #endif 2125 - req->r_request->trail = &req->r_trail; 2126 - 2127 - register_request(osdc, req); 2128 - 2129 1747 down_read(&osdc->map_sem); 2130 1748 mutex_lock(&osdc->request_mutex); 2131 - /* 2132 - * a racing kick_requests() may have sent the message for us 2133 - * while we dropped request_mutex above, so only send now if 2134 - * the request still han't been touched yet. 2135 - */ 2136 - if (req->r_sent == 0) { 2137 - rc = __map_request(osdc, req, 0); 2138 - if (rc < 0) { 2139 - if (nofail) { 2140 - dout("osdc_start_request failed map, " 2141 - " will retry %lld\n", req->r_tid); 2142 - rc = 0; 2143 - } 2144 - goto out_unlock; 1749 + __register_request(osdc, req); 1750 + WARN_ON(req->r_sent); 1751 + rc = __map_request(osdc, req, 0); 1752 + if (rc < 0) { 1753 + if (nofail) { 1754 + dout("osdc_start_request failed map, " 1755 + " will retry %lld\n", req->r_tid); 1756 + rc = 0; 2145 1757 } 2146 - if (req->r_osd == NULL) { 2147 - dout("send_request %p no up osds in pg\n", req); 2148 - ceph_monc_request_next_osdmap(&osdc->client->monc); 2149 - } else { 2150 - __send_request(osdc, req); 2151 - } 2152 - rc = 0; 1758 + goto out_unlock; 2153 1759 } 2154 - 1760 + if (req->r_osd == NULL) { 1761 + dout("send_request %p no up osds in pg\n", req); 1762 + ceph_monc_request_next_osdmap(&osdc->client->monc); 1763 + } else { 1764 + __send_queued(osdc); 1765 + } 1766 + rc = 0; 2155 1767 out_unlock: 2156 1768 mutex_unlock(&osdc->request_mutex); 2157 1769 up_read(&osdc->map_sem); ··· 2298 1940 2299 1941 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino, 2300 1942 vino.snap, off, *plen); 2301 - req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 1943 + req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 1, 2302 1944 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, 2303 - NULL, 0, truncate_seq, truncate_size, NULL, 2304 - false, page_align); 1945 + NULL, truncate_seq, truncate_size, 1946 + false); 2305 1947 if (IS_ERR(req)) 2306 1948 return PTR_ERR(req); 2307 1949 2308 1950 /* it may be a short read due to an object boundary */ 2309 - req->r_pages = pages; 2310 1951 2311 - dout("readpages final extent is %llu~%llu (%d pages align %d)\n", 2312 - off, *plen, req->r_num_pages, page_align); 1952 + osd_req_op_extent_osd_data_pages(req, 0, 1953 + pages, *plen, page_align, false, false); 1954 + 1955 + dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n", 1956 + off, *plen, *plen, page_align); 1957 + 1958 + ceph_osdc_build_request(req, off, NULL, vino.snap, NULL); 2313 1959 2314 1960 rc = ceph_osdc_start_request(osdc, req, false); 2315 1961 if (!rc) ··· 2340 1978 int rc = 0; 2341 1979 int page_align = off & ~PAGE_MASK; 2342 1980 2343 - BUG_ON(vino.snap != CEPH_NOSNAP); 2344 - req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 1981 + BUG_ON(vino.snap != CEPH_NOSNAP); /* snapshots aren't writeable */ 1982 + req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 1, 2345 1983 CEPH_OSD_OP_WRITE, 2346 1984 CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE, 2347 - snapc, 0, 2348 - truncate_seq, truncate_size, mtime, 2349 - true, page_align); 1985 + snapc, truncate_seq, truncate_size, 1986 + true); 2350 1987 if (IS_ERR(req)) 2351 1988 return PTR_ERR(req); 2352 1989 2353 1990 /* it may be a short write due to an object boundary */ 2354 - req->r_pages = pages; 2355 - dout("writepages %llu~%llu (%d pages)\n", off, len, 2356 - req->r_num_pages); 1991 + osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align, 1992 + false, false); 1993 + dout("writepages %llu~%llu (%llu bytes)\n", off, len, len); 1994 + 1995 + ceph_osdc_build_request(req, off, snapc, CEPH_NOSNAP, mtime); 2357 1996 2358 1997 rc = ceph_osdc_start_request(osdc, req, true); 2359 1998 if (!rc) ··· 2367 2004 return rc; 2368 2005 } 2369 2006 EXPORT_SYMBOL(ceph_osdc_writepages); 2007 + 2008 + int ceph_osdc_setup(void) 2009 + { 2010 + BUG_ON(ceph_osd_request_cache); 2011 + ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", 2012 + sizeof (struct ceph_osd_request), 2013 + __alignof__(struct ceph_osd_request), 2014 + 0, NULL); 2015 + 2016 + return ceph_osd_request_cache ? 0 : -ENOMEM; 2017 + } 2018 + EXPORT_SYMBOL(ceph_osdc_setup); 2019 + 2020 + void ceph_osdc_cleanup(void) 2021 + { 2022 + BUG_ON(!ceph_osd_request_cache); 2023 + kmem_cache_destroy(ceph_osd_request_cache); 2024 + ceph_osd_request_cache = NULL; 2025 + } 2026 + EXPORT_SYMBOL(ceph_osdc_cleanup); 2370 2027 2371 2028 /* 2372 2029 * handle incoming message ··· 2447 2064 goto out; 2448 2065 } 2449 2066 2450 - if (req->r_con_filling_msg) { 2067 + if (req->r_reply->con) 2451 2068 dout("%s revoking msg %p from old con %p\n", __func__, 2452 - req->r_reply, req->r_con_filling_msg); 2453 - ceph_msg_revoke_incoming(req->r_reply); 2454 - req->r_con_filling_msg->ops->put(req->r_con_filling_msg); 2455 - req->r_con_filling_msg = NULL; 2456 - } 2069 + req->r_reply, req->r_reply->con); 2070 + ceph_msg_revoke_incoming(req->r_reply); 2457 2071 2458 2072 if (front > req->r_reply->front.iov_len) { 2459 2073 pr_warning("get_reply front %d > preallocated %d\n", ··· 2464 2084 m = ceph_msg_get(req->r_reply); 2465 2085 2466 2086 if (data_len > 0) { 2467 - int want = calc_pages_for(req->r_page_alignment, data_len); 2087 + struct ceph_osd_data *osd_data; 2468 2088 2469 - if (req->r_pages && unlikely(req->r_num_pages < want)) { 2470 - pr_warning("tid %lld reply has %d bytes %d pages, we" 2471 - " had only %d pages ready\n", tid, data_len, 2472 - want, req->r_num_pages); 2473 - *skip = 1; 2474 - ceph_msg_put(m); 2475 - m = NULL; 2476 - goto out; 2089 + /* 2090 + * XXX This is assuming there is only one op containing 2091 + * XXX page data. Probably OK for reads, but this 2092 + * XXX ought to be done more generally. 2093 + */ 2094 + osd_data = osd_req_op_extent_osd_data(req, 0); 2095 + if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { 2096 + if (osd_data->pages && 2097 + unlikely(osd_data->length < data_len)) { 2098 + 2099 + pr_warning("tid %lld reply has %d bytes " 2100 + "we had only %llu bytes ready\n", 2101 + tid, data_len, osd_data->length); 2102 + *skip = 1; 2103 + ceph_msg_put(m); 2104 + m = NULL; 2105 + goto out; 2106 + } 2477 2107 } 2478 - m->pages = req->r_pages; 2479 - m->nr_pages = req->r_num_pages; 2480 - m->page_alignment = req->r_page_alignment; 2481 - #ifdef CONFIG_BLOCK 2482 - m->bio = req->r_bio; 2483 - #endif 2484 2108 } 2485 2109 *skip = 0; 2486 - req->r_con_filling_msg = con->ops->get(con); 2487 2110 dout("get_reply tid %lld %p\n", tid, m); 2488 2111 2489 2112 out: ··· 2551 2168 struct ceph_auth_handshake *auth = &o->o_auth; 2552 2169 2553 2170 if (force_new && auth->authorizer) { 2554 - if (ac->ops && ac->ops->destroy_authorizer) 2555 - ac->ops->destroy_authorizer(ac, auth->authorizer); 2171 + ceph_auth_destroy_authorizer(ac, auth->authorizer); 2556 2172 auth->authorizer = NULL; 2557 2173 } 2558 - if (!auth->authorizer && ac->ops && ac->ops->create_authorizer) { 2559 - int ret = ac->ops->create_authorizer(ac, CEPH_ENTITY_TYPE_OSD, 2560 - auth); 2174 + if (!auth->authorizer) { 2175 + int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD, 2176 + auth); 2177 + if (ret) 2178 + return ERR_PTR(ret); 2179 + } else { 2180 + int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD, 2181 + auth); 2561 2182 if (ret) 2562 2183 return ERR_PTR(ret); 2563 2184 } ··· 2577 2190 struct ceph_osd_client *osdc = o->o_osdc; 2578 2191 struct ceph_auth_client *ac = osdc->client->monc.auth; 2579 2192 2580 - /* 2581 - * XXX If ac->ops or ac->ops->verify_authorizer_reply is null, 2582 - * XXX which do we do: succeed or fail? 2583 - */ 2584 - return ac->ops->verify_authorizer_reply(ac, o->o_auth.authorizer, len); 2193 + return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer, len); 2585 2194 } 2586 2195 2587 2196 static int invalidate_authorizer(struct ceph_connection *con) ··· 2586 2203 struct ceph_osd_client *osdc = o->o_osdc; 2587 2204 struct ceph_auth_client *ac = osdc->client->monc.auth; 2588 2205 2589 - if (ac->ops && ac->ops->invalidate_authorizer) 2590 - ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD); 2591 - 2206 + ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD); 2592 2207 return ceph_monc_validate_auth(&osdc->client->monc); 2593 2208 } 2594 2209
+11 -34
net/ceph/osdmap.c
··· 654 654 return 0; 655 655 } 656 656 657 - static int __decode_pgid(void **p, void *end, struct ceph_pg *pg) 658 - { 659 - u8 v; 660 - 661 - ceph_decode_need(p, end, 1+8+4+4, bad); 662 - v = ceph_decode_8(p); 663 - if (v != 1) 664 - goto bad; 665 - pg->pool = ceph_decode_64(p); 666 - pg->seed = ceph_decode_32(p); 667 - *p += 4; /* skip preferred */ 668 - return 0; 669 - 670 - bad: 671 - dout("error decoding pgid\n"); 672 - return -EINVAL; 673 - } 674 - 675 657 /* 676 658 * decode a full map. 677 659 */ ··· 747 765 struct ceph_pg pgid; 748 766 struct ceph_pg_mapping *pg; 749 767 750 - err = __decode_pgid(p, end, &pgid); 768 + err = ceph_decode_pgid(p, end, &pgid); 751 769 if (err) 752 770 goto bad; 753 771 ceph_decode_need(p, end, sizeof(u32), bad); ··· 965 983 struct ceph_pg pgid; 966 984 u32 pglen; 967 985 968 - err = __decode_pgid(p, end, &pgid); 986 + err = ceph_decode_pgid(p, end, &pgid); 969 987 if (err) 970 988 goto bad; 971 989 ceph_decode_need(p, end, sizeof(u32), bad); ··· 1093 1111 * calculate an object layout (i.e. pgid) from an oid, 1094 1112 * file_layout, and osdmap 1095 1113 */ 1096 - int ceph_calc_object_layout(struct ceph_pg *pg, 1097 - const char *oid, 1098 - struct ceph_file_layout *fl, 1099 - struct ceph_osdmap *osdmap) 1114 + int ceph_calc_ceph_pg(struct ceph_pg *pg, const char *oid, 1115 + struct ceph_osdmap *osdmap, uint64_t pool) 1100 1116 { 1101 - unsigned int num, num_mask; 1102 - struct ceph_pg_pool_info *pool; 1117 + struct ceph_pg_pool_info *pool_info; 1103 1118 1104 1119 BUG_ON(!osdmap); 1105 - pg->pool = le32_to_cpu(fl->fl_pg_pool); 1106 - pool = __lookup_pg_pool(&osdmap->pg_pools, pg->pool); 1107 - if (!pool) 1120 + pool_info = __lookup_pg_pool(&osdmap->pg_pools, pool); 1121 + if (!pool_info) 1108 1122 return -EIO; 1109 - pg->seed = ceph_str_hash(pool->object_hash, oid, strlen(oid)); 1110 - num = pool->pg_num; 1111 - num_mask = pool->pg_num_mask; 1123 + pg->pool = pool; 1124 + pg->seed = ceph_str_hash(pool_info->object_hash, oid, strlen(oid)); 1112 1125 1113 - dout("calc_object_layout '%s' pgid %lld.%x\n", oid, pg->pool, pg->seed); 1126 + dout("%s '%s' pgid %lld.%x\n", __func__, oid, pg->pool, pg->seed); 1114 1127 return 0; 1115 1128 } 1116 - EXPORT_SYMBOL(ceph_calc_object_layout); 1129 + EXPORT_SYMBOL(ceph_calc_ceph_pg); 1117 1130 1118 1131 /* 1119 1132 * Calculate raw osd vector for the given pgid. Return pointer to osd
+78
net/ceph/snapshot.c
··· 1 + /* 2 + * snapshot.c Ceph snapshot context utility routines (part of libceph) 3 + * 4 + * Copyright (C) 2013 Inktank Storage, Inc. 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public License 8 + * version 2 as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 + * General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program; if not, write to the Free Software 17 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 18 + * 02110-1301, USA. 19 + */ 20 + 21 + #include <stddef.h> 22 + 23 + #include <linux/types.h> 24 + #include <linux/export.h> 25 + #include <linux/ceph/libceph.h> 26 + 27 + /* 28 + * Ceph snapshot contexts are reference counted objects, and the 29 + * returned structure holds a single reference. Acquire additional 30 + * references with ceph_get_snap_context(), and release them with 31 + * ceph_put_snap_context(). When the reference count reaches zero 32 + * the entire structure is freed. 33 + */ 34 + 35 + /* 36 + * Create a new ceph snapshot context large enough to hold the 37 + * indicated number of snapshot ids (which can be 0). Caller has 38 + * to fill in snapc->seq and snapc->snaps[0..snap_count-1]. 39 + * 40 + * Returns a null pointer if an error occurs. 41 + */ 42 + struct ceph_snap_context *ceph_create_snap_context(u32 snap_count, 43 + gfp_t gfp_flags) 44 + { 45 + struct ceph_snap_context *snapc; 46 + size_t size; 47 + 48 + size = sizeof (struct ceph_snap_context); 49 + size += snap_count * sizeof (snapc->snaps[0]); 50 + snapc = kzalloc(size, gfp_flags); 51 + if (!snapc) 52 + return NULL; 53 + 54 + atomic_set(&snapc->nref, 1); 55 + snapc->num_snaps = snap_count; 56 + 57 + return snapc; 58 + } 59 + EXPORT_SYMBOL(ceph_create_snap_context); 60 + 61 + struct ceph_snap_context *ceph_get_snap_context(struct ceph_snap_context *sc) 62 + { 63 + if (sc) 64 + atomic_inc(&sc->nref); 65 + return sc; 66 + } 67 + EXPORT_SYMBOL(ceph_get_snap_context); 68 + 69 + void ceph_put_snap_context(struct ceph_snap_context *sc) 70 + { 71 + if (!sc) 72 + return; 73 + if (atomic_dec_and_test(&sc->nref)) { 74 + /*printk(" deleting snap_context %p\n", sc);*/ 75 + kfree(sc); 76 + } 77 + } 78 + EXPORT_SYMBOL(ceph_put_snap_context);