Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client

Pull Ceph fixes from Sage Weil:
"We have a few follow-up fixes for the libceph refactor from Ilya, and
then some cephfs + fscache fixes from Zheng.

The first two FS-Cache patches are acked by David Howells and deemed
trivial enough to go through our tree. The rest fix some issues with
the ceph fscache handling (disable cache for inodes opened for write,
and simplify the revalidation logic accordingly, dropping the
now-unnecessary work queue)"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client:
ceph: use i_version to check validity of fscache
ceph: improve fscache revalidation
ceph: disable fscache when inode is opened for write
ceph: avoid unnecessary fscache invalidation/revlidation
ceph: call __fscache_uncache_page() if readpages fails
FS-Cache: make check_consistency callback return int
FS-Cache: wake write waiter after invalidating writes
libceph: use %s instead of %pE in dout()s
libceph: put request only if it's done in handle_reply()
libceph: change ceph_osdmap_flag() to take osdc

+138 -178
+1 -1
fs/cachefiles/interface.c
··· 380 380 * check if the backing cache is updated to FS-Cache 381 381 * - called by FS-Cache when evaluates if need to invalidate the cache 382 382 */ 383 - static bool cachefiles_check_consistency(struct fscache_operation *op) 383 + static int cachefiles_check_consistency(struct fscache_operation *op) 384 384 { 385 385 struct cachefiles_object *object; 386 386 struct cachefiles_cache *cache;
+3 -3
fs/ceph/addr.c
··· 276 276 for (i = 0; i < num_pages; i++) { 277 277 struct page *page = osd_data->pages[i]; 278 278 279 - if (rc < 0 && rc != -ENOENT) 279 + if (rc < 0 && rc != -ENOENT) { 280 + ceph_fscache_readpage_cancel(inode, page); 280 281 goto unlock; 282 + } 281 283 if (bytes < (int)PAGE_SIZE) { 282 284 /* zero (remainder of) page */ 283 285 int s = bytes < 0 ? 0 : bytes; ··· 536 534 if (writeback_stat > 537 535 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb)) 538 536 set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC); 539 - 540 - ceph_readpage_to_fscache(inode, page); 541 537 542 538 set_page_writeback(page); 543 539 err = ceph_osdc_writepages(osdc, ceph_vino(inode),
+58 -83
fs/ceph/cache.c
··· 25 25 #include "cache.h" 26 26 27 27 struct ceph_aux_inode { 28 + u64 version; 28 29 struct timespec mtime; 29 30 loff_t size; 30 31 }; ··· 70 69 fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index, 71 70 &ceph_fscache_fsid_object_def, 72 71 fsc, true); 73 - 74 - if (fsc->fscache == NULL) { 72 + if (!fsc->fscache) 75 73 pr_err("Unable to resgister fsid: %p fscache cookie", fsc); 76 - return 0; 77 - } 78 - 79 - fsc->revalidate_wq = alloc_workqueue("ceph-revalidate", 0, 1); 80 - if (fsc->revalidate_wq == NULL) 81 - return -ENOMEM; 82 74 83 75 return 0; 84 76 } ··· 99 105 const struct inode* inode = &ci->vfs_inode; 100 106 101 107 memset(&aux, 0, sizeof(aux)); 108 + aux.version = ci->i_version; 102 109 aux.mtime = inode->i_mtime; 103 110 aux.size = i_size_read(inode); 104 111 ··· 126 131 return FSCACHE_CHECKAUX_OBSOLETE; 127 132 128 133 memset(&aux, 0, sizeof(aux)); 134 + aux.version = ci->i_version; 129 135 aux.mtime = inode->i_mtime; 130 136 aux.size = i_size_read(inode); 131 137 ··· 177 181 .now_uncached = ceph_fscache_inode_now_uncached, 178 182 }; 179 183 180 - void ceph_fscache_register_inode_cookie(struct ceph_fs_client* fsc, 181 - struct ceph_inode_info* ci) 184 + void ceph_fscache_register_inode_cookie(struct inode *inode) 182 185 { 183 - struct inode* inode = &ci->vfs_inode; 186 + struct ceph_inode_info *ci = ceph_inode(inode); 187 + struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 184 188 185 189 /* No caching for filesystem */ 186 190 if (fsc->fscache == NULL) 187 191 return; 188 192 189 193 /* Only cache for regular files that are read only */ 190 - if ((ci->vfs_inode.i_mode & S_IFREG) == 0) 194 + if (!S_ISREG(inode->i_mode)) 191 195 return; 192 196 193 - /* Avoid multiple racing open requests */ 194 - inode_lock(inode); 195 - 196 - if (ci->fscache) 197 - goto done; 198 - 199 - ci->fscache = fscache_acquire_cookie(fsc->fscache, 200 - &ceph_fscache_inode_object_def, 201 - ci, true); 202 - fscache_check_consistency(ci->fscache); 203 - done: 197 + inode_lock_nested(inode, I_MUTEX_CHILD); 198 + if (!ci->fscache) { 199 + ci->fscache = fscache_acquire_cookie(fsc->fscache, 200 + &ceph_fscache_inode_object_def, 201 + ci, false); 202 + } 204 203 inode_unlock(inode); 205 - 206 204 } 207 205 208 206 void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci) ··· 210 220 211 221 fscache_uncache_all_inode_pages(cookie, &ci->vfs_inode); 212 222 fscache_relinquish_cookie(cookie, 0); 223 + } 224 + 225 + static bool ceph_fscache_can_enable(void *data) 226 + { 227 + struct inode *inode = data; 228 + return !inode_is_open_for_write(inode); 229 + } 230 + 231 + void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp) 232 + { 233 + struct ceph_inode_info *ci = ceph_inode(inode); 234 + 235 + if (!fscache_cookie_valid(ci->fscache)) 236 + return; 237 + 238 + if (inode_is_open_for_write(inode)) { 239 + dout("fscache_file_set_cookie %p %p disabling cache\n", 240 + inode, filp); 241 + fscache_disable_cookie(ci->fscache, false); 242 + fscache_uncache_all_inode_pages(ci->fscache, inode); 243 + } else { 244 + fscache_enable_cookie(ci->fscache, ceph_fscache_can_enable, 245 + inode); 246 + if (fscache_cookie_enabled(ci->fscache)) { 247 + dout("fscache_file_set_cookie %p %p enabing cache\n", 248 + inode, filp); 249 + } 250 + } 213 251 } 214 252 215 253 static void ceph_vfs_readpage_complete(struct page *page, void *data, int error) ··· 256 238 257 239 static inline bool cache_valid(struct ceph_inode_info *ci) 258 240 { 259 - return ((ceph_caps_issued(ci) & CEPH_CAP_FILE_CACHE) && 260 - (ci->i_fscache_gen == ci->i_rdcache_gen)); 241 + return ci->i_fscache_gen == ci->i_rdcache_gen; 261 242 } 262 243 263 244 ··· 349 332 350 333 void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc) 351 334 { 352 - if (fsc->revalidate_wq) 353 - destroy_workqueue(fsc->revalidate_wq); 354 - 355 335 fscache_relinquish_cookie(fsc->fscache, 0); 356 336 fsc->fscache = NULL; 357 337 } 358 338 359 - static void ceph_revalidate_work(struct work_struct *work) 339 + /* 340 + * caller should hold CEPH_CAP_FILE_{RD,CACHE} 341 + */ 342 + void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci) 360 343 { 361 - int issued; 362 - u32 orig_gen; 363 - struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 364 - i_revalidate_work); 365 - struct inode *inode = &ci->vfs_inode; 366 - 367 - spin_lock(&ci->i_ceph_lock); 368 - issued = __ceph_caps_issued(ci, NULL); 369 - orig_gen = ci->i_rdcache_gen; 370 - spin_unlock(&ci->i_ceph_lock); 371 - 372 - if (!(issued & CEPH_CAP_FILE_CACHE)) { 373 - dout("revalidate_work lost cache before validation %p\n", 374 - inode); 375 - goto out; 376 - } 377 - 378 - if (!fscache_check_consistency(ci->fscache)) 379 - fscache_invalidate(ci->fscache); 380 - 381 - spin_lock(&ci->i_ceph_lock); 382 - /* Update the new valid generation (backwards sanity check too) */ 383 - if (orig_gen > ci->i_fscache_gen) { 384 - ci->i_fscache_gen = orig_gen; 385 - } 386 - spin_unlock(&ci->i_ceph_lock); 387 - 388 - out: 389 - iput(&ci->vfs_inode); 390 - } 391 - 392 - void ceph_queue_revalidate(struct inode *inode) 393 - { 394 - struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); 395 - struct ceph_inode_info *ci = ceph_inode(inode); 396 - 397 - if (fsc->revalidate_wq == NULL || ci->fscache == NULL) 344 + if (cache_valid(ci)) 398 345 return; 399 346 400 - ihold(inode); 401 - 402 - if (queue_work(ceph_sb_to_client(inode->i_sb)->revalidate_wq, 403 - &ci->i_revalidate_work)) { 404 - dout("ceph_queue_revalidate %p\n", inode); 405 - } else { 406 - dout("ceph_queue_revalidate %p failed\n)", inode); 407 - iput(inode); 347 + /* resue i_truncate_mutex. There should be no pending 348 + * truncate while the caller holds CEPH_CAP_FILE_RD */ 349 + mutex_lock(&ci->i_truncate_mutex); 350 + if (!cache_valid(ci)) { 351 + if (fscache_check_consistency(ci->fscache)) 352 + fscache_invalidate(ci->fscache); 353 + spin_lock(&ci->i_ceph_lock); 354 + ci->i_fscache_gen = ci->i_rdcache_gen; 355 + spin_unlock(&ci->i_ceph_lock); 408 356 } 409 - } 410 - 411 - void ceph_fscache_inode_init(struct ceph_inode_info *ci) 412 - { 413 - ci->fscache = NULL; 414 - /* The first load is verifed cookie open time */ 415 - ci->i_fscache_gen = 1; 416 - INIT_WORK(&ci->i_revalidate_work, ceph_revalidate_work); 357 + mutex_unlock(&ci->i_truncate_mutex); 417 358 }
+26 -18
fs/ceph/cache.h
··· 34 34 int ceph_fscache_register_fs(struct ceph_fs_client* fsc); 35 35 void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc); 36 36 37 - void ceph_fscache_inode_init(struct ceph_inode_info *ci); 38 - void ceph_fscache_register_inode_cookie(struct ceph_fs_client* fsc, 39 - struct ceph_inode_info* ci); 37 + void ceph_fscache_register_inode_cookie(struct inode *inode); 40 38 void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci); 39 + void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp); 40 + void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci); 41 41 42 42 int ceph_readpage_from_fscache(struct inode *inode, struct page *page); 43 43 int ceph_readpages_from_fscache(struct inode *inode, ··· 46 46 unsigned *nr_pages); 47 47 void ceph_readpage_to_fscache(struct inode *inode, struct page *page); 48 48 void ceph_invalidate_fscache_page(struct inode* inode, struct page *page); 49 - void ceph_queue_revalidate(struct inode *inode); 50 49 51 - static inline void ceph_fscache_update_objectsize(struct inode *inode) 50 + static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci) 52 51 { 53 - struct ceph_inode_info *ci = ceph_inode(inode); 54 - fscache_attr_changed(ci->fscache); 52 + ci->fscache = NULL; 53 + ci->i_fscache_gen = 0; 55 54 } 56 55 57 56 static inline void ceph_fscache_invalidate(struct inode *inode) ··· 87 88 return fscache_readpages_cancel(ci->fscache, pages); 88 89 } 89 90 91 + static inline void ceph_disable_fscache_readpage(struct ceph_inode_info *ci) 92 + { 93 + ci->i_fscache_gen = ci->i_rdcache_gen - 1; 94 + } 95 + 90 96 #else 91 97 92 98 static inline int ceph_fscache_register(void) ··· 116 112 { 117 113 } 118 114 119 - static inline void ceph_fscache_register_inode_cookie(struct ceph_fs_client* parent_fsc, 120 - struct ceph_inode_info* ci) 115 + static inline void ceph_fscache_register_inode_cookie(struct inode *inode) 116 + { 117 + } 118 + 119 + static inline void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci) 120 + { 121 + } 122 + 123 + static inline void ceph_fscache_file_set_cookie(struct inode *inode, 124 + struct file *filp) 125 + { 126 + } 127 + 128 + static inline void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci) 121 129 { 122 130 } 123 131 ··· 157 141 { 158 142 } 159 143 160 - static inline void ceph_fscache_update_objectsize(struct inode *inode) 161 - { 162 - } 163 - 164 144 static inline void ceph_fscache_invalidate(struct inode *inode) 165 145 { 166 146 } 167 147 168 148 static inline void ceph_invalidate_fscache_page(struct inode *inode, 169 149 struct page *page) 170 - { 171 - } 172 - 173 - static inline void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci) 174 150 { 175 151 } 176 152 ··· 181 173 { 182 174 } 183 175 184 - static inline void ceph_queue_revalidate(struct inode *inode) 176 + static inline void ceph_disable_fscache_readpage(struct ceph_inode_info *ci) 185 177 { 186 178 } 187 179
+8 -15
fs/ceph/caps.c
··· 2393 2393 snap_rwsem_locked = true; 2394 2394 } 2395 2395 *got = need | (have & want); 2396 + if ((need & CEPH_CAP_FILE_RD) && 2397 + !(*got & CEPH_CAP_FILE_CACHE)) 2398 + ceph_disable_fscache_readpage(ci); 2396 2399 __take_cap_refs(ci, *got, true); 2397 2400 ret = 1; 2398 2401 } ··· 2556 2553 } 2557 2554 break; 2558 2555 } 2556 + 2557 + if ((_got & CEPH_CAP_FILE_RD) && (_got & CEPH_CAP_FILE_CACHE)) 2558 + ceph_fscache_revalidate_cookie(ci); 2559 2559 2560 2560 *got = _got; 2561 2561 return 0; ··· 2801 2795 bool writeback = false; 2802 2796 bool queue_trunc = false; 2803 2797 bool queue_invalidate = false; 2804 - bool queue_revalidate = false; 2805 2798 bool deleted_inode = false; 2806 2799 bool fill_inline = false; 2807 2800 ··· 2842 2837 ci->i_rdcache_revoking = ci->i_rdcache_gen; 2843 2838 } 2844 2839 } 2845 - 2846 - ceph_fscache_invalidate(inode); 2847 2840 } 2848 2841 2849 2842 /* side effects now are allowed */ ··· 2882 2879 ceph_forget_all_cached_acls(inode); 2883 2880 } 2884 2881 } 2885 - 2886 - /* Do we need to revalidate our fscache cookie. Don't bother on the 2887 - * first cache cap as we already validate at cookie creation time. */ 2888 - if ((issued & CEPH_CAP_FILE_CACHE) && ci->i_rdcache_gen > 1) 2889 - queue_revalidate = true; 2890 2882 2891 2883 if (newcaps & CEPH_CAP_ANY_RD) { 2892 2884 /* ctime/mtime/atime? */ ··· 2991 2993 if (fill_inline) 2992 2994 ceph_fill_inline_data(inode, NULL, inline_data, inline_len); 2993 2995 2994 - if (queue_trunc) { 2996 + if (queue_trunc) 2995 2997 ceph_queue_vmtruncate(inode); 2996 - ceph_queue_revalidate(inode); 2997 - } else if (queue_revalidate) 2998 - ceph_queue_revalidate(inode); 2999 2998 3000 2999 if (writeback) 3001 3000 /* ··· 3194 3199 truncate_seq, truncate_size, size); 3195 3200 spin_unlock(&ci->i_ceph_lock); 3196 3201 3197 - if (queue_trunc) { 3202 + if (queue_trunc) 3198 3203 ceph_queue_vmtruncate(inode); 3199 - ceph_fscache_invalidate(inode); 3200 - } 3201 3204 } 3202 3205 3203 3206 /*
+6 -21
fs/ceph/file.c
··· 137 137 { 138 138 struct ceph_file_info *cf; 139 139 int ret = 0; 140 - struct ceph_inode_info *ci = ceph_inode(inode); 141 - struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); 142 - struct ceph_mds_client *mdsc = fsc->mdsc; 143 140 144 141 switch (inode->i_mode & S_IFMT) { 145 142 case S_IFREG: 146 - /* First file open request creates the cookie, we want to keep 147 - * this cookie around for the filetime of the inode as not to 148 - * have to worry about fscache register / revoke / operation 149 - * races. 150 - * 151 - * Also, if we know the operation is going to invalidate data 152 - * (non readonly) just nuke the cache right away. 153 - */ 154 - ceph_fscache_register_inode_cookie(mdsc->fsc, ci); 155 - if ((fmode & CEPH_FILE_MODE_WR)) 156 - ceph_fscache_invalidate(inode); 143 + ceph_fscache_register_inode_cookie(inode); 144 + ceph_fscache_file_set_cookie(inode, file); 157 145 case S_IFDIR: 158 146 dout("init_file %p %p 0%o (regular)\n", inode, file, 159 147 inode->i_mode); ··· 1337 1349 } 1338 1350 1339 1351 retry_snap: 1340 - if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) { 1352 + if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) { 1341 1353 err = -ENOSPC; 1342 1354 goto out; 1343 1355 } ··· 1395 1407 iov_iter_advance(from, written); 1396 1408 ceph_put_snap_context(snapc); 1397 1409 } else { 1398 - loff_t old_size = i_size_read(inode); 1399 1410 /* 1400 1411 * No need to acquire the i_truncate_mutex. Because 1401 1412 * the MDS revokes Fwb caps before sending truncate ··· 1405 1418 written = generic_perform_write(file, from, pos); 1406 1419 if (likely(written >= 0)) 1407 1420 iocb->ki_pos = pos + written; 1408 - if (i_size_read(inode) > old_size) 1409 - ceph_fscache_update_objectsize(inode); 1410 1421 inode_unlock(inode); 1411 1422 } 1412 1423 ··· 1425 1440 ceph_put_cap_refs(ci, got); 1426 1441 1427 1442 if (written >= 0) { 1428 - if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL)) 1443 + if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_NEARFULL)) 1429 1444 iocb->ki_flags |= IOCB_DSYNC; 1430 1445 1431 1446 written = generic_write_sync(iocb, written); ··· 1657 1672 goto unlock; 1658 1673 } 1659 1674 1660 - if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) && 1661 - !(mode & FALLOC_FL_PUNCH_HOLE)) { 1675 + if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) && 1676 + !(mode & FALLOC_FL_PUNCH_HOLE)) { 1662 1677 ret = -ENOSPC; 1663 1678 goto unlock; 1664 1679 }
+1 -3
fs/ceph/super.h
··· 103 103 104 104 #ifdef CONFIG_CEPH_FSCACHE 105 105 struct fscache_cookie *fscache; 106 - struct workqueue_struct *revalidate_wq; 107 106 #endif 108 107 }; 109 108 ··· 359 360 360 361 #ifdef CONFIG_CEPH_FSCACHE 361 362 struct fscache_cookie *fscache; 362 - u32 i_fscache_gen; /* sequence, for delayed fscache validate */ 363 - struct work_struct i_revalidate_work; 363 + u32 i_fscache_gen; 364 364 #endif 365 365 struct inode vfs_inode; /* at end */ 366 366 };
+2
fs/fscache/page.c
··· 887 887 put_page(results[i]); 888 888 } 889 889 890 + wake_up_bit(&cookie->flags, 0); 891 + 890 892 _leave(""); 891 893 } 892 894
+5
include/linux/ceph/osd_client.h
··· 279 279 struct workqueue_struct *notify_wq; 280 280 }; 281 281 282 + static inline bool ceph_osdmap_flag(struct ceph_osd_client *osdc, int flag) 283 + { 284 + return osdc->osdmap->flags & flag; 285 + } 286 + 282 287 extern int ceph_osdc_setup(void); 283 288 extern void ceph_osdc_cleanup(void); 284 289
-5
include/linux/ceph/osdmap.h
··· 189 189 return !ceph_osd_is_up(map, osd); 190 190 } 191 191 192 - static inline bool ceph_osdmap_flag(struct ceph_osdmap *map, int flag) 193 - { 194 - return map && (map->flags & flag); 195 - } 196 - 197 192 extern char *ceph_osdmap_state_str(char *str, int len, int state); 198 193 extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd); 199 194
+1 -1
include/linux/fscache-cache.h
··· 241 241 242 242 /* check the consistency between the backing cache and the FS-Cache 243 243 * cookie */ 244 - bool (*check_consistency)(struct fscache_operation *op); 244 + int (*check_consistency)(struct fscache_operation *op); 245 245 246 246 /* store the updated auxiliary data on an object */ 247 247 void (*update_object)(struct fscache_object *object);
+25 -26
net/ceph/osd_client.c
··· 1276 1276 const struct ceph_osd_request_target *t, 1277 1277 struct ceph_pg_pool_info *pi) 1278 1278 { 1279 - bool pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD); 1280 - bool pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) || 1281 - ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || 1279 + bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); 1280 + bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || 1281 + ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 1282 1282 __pool_full(pi); 1283 1283 1284 1284 WARN_ON(pi->id != t->base_oloc.pool); ··· 1303 1303 bool force_resend = false; 1304 1304 bool need_check_tiering = false; 1305 1305 bool need_resend = false; 1306 - bool sort_bitwise = ceph_osdmap_flag(osdc->osdmap, 1307 - CEPH_OSDMAP_SORTBITWISE); 1306 + bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE); 1308 1307 enum calc_target_result ct_res; 1309 1308 int ret; 1310 1309 ··· 1539 1540 */ 1540 1541 msg->hdr.data_off = cpu_to_le16(req->r_data_offset); 1541 1542 1542 - dout("%s req %p oid %*pE oid_len %d front %zu data %u\n", __func__, 1543 - req, req->r_t.target_oid.name_len, req->r_t.target_oid.name, 1544 - req->r_t.target_oid.name_len, msg->front.iov_len, data_len); 1543 + dout("%s req %p oid %s oid_len %d front %zu data %u\n", __func__, 1544 + req, req->r_t.target_oid.name, req->r_t.target_oid.name_len, 1545 + msg->front.iov_len, data_len); 1545 1546 } 1546 1547 1547 1548 /* ··· 1589 1590 verify_osdc_locked(osdc); 1590 1591 WARN_ON(!osdc->osdmap->epoch); 1591 1592 1592 - if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || 1593 - ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD) || 1594 - ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) { 1593 + if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 1594 + ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) || 1595 + ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) { 1595 1596 dout("%s osdc %p continuous\n", __func__, osdc); 1596 1597 continuous = true; 1597 1598 } else { ··· 1628 1629 } 1629 1630 1630 1631 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && 1631 - ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) { 1632 + ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) { 1632 1633 dout("req %p pausewr\n", req); 1633 1634 req->r_t.paused = true; 1634 1635 maybe_request_map(osdc); 1635 1636 } else if ((req->r_flags & CEPH_OSD_FLAG_READ) && 1636 - ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD)) { 1637 + ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) { 1637 1638 dout("req %p pauserd\n", req); 1638 1639 req->r_t.paused = true; 1639 1640 maybe_request_map(osdc); 1640 1641 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && 1641 1642 !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY | 1642 1643 CEPH_OSD_FLAG_FULL_FORCE)) && 1643 - (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || 1644 + (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 1644 1645 pool_full(osdc, req->r_t.base_oloc.pool))) { 1645 1646 dout("req %p full/pool_full\n", req); 1646 1647 pr_warn_ratelimited("FULL or reached pool quota\n"); ··· 2279 2280 struct ceph_osd_request *req = lreq->ping_req; 2280 2281 struct ceph_osd_req_op *op = &req->r_ops[0]; 2281 2282 2282 - if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD)) { 2283 + if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) { 2283 2284 dout("%s PAUSERD\n", __func__); 2284 2285 return; 2285 2286 } ··· 2892 2893 dout("req %p tid %llu cb\n", req, req->r_tid); 2893 2894 __complete_request(req); 2894 2895 } 2896 + if (m.flags & CEPH_OSD_FLAG_ONDISK) 2897 + complete_all(&req->r_safe_completion); 2898 + ceph_osdc_put_request(req); 2895 2899 } else { 2896 2900 if (req->r_unsafe_callback) { 2897 2901 dout("req %p tid %llu unsafe-cb\n", req, req->r_tid); ··· 2903 2901 WARN_ON(1); 2904 2902 } 2905 2903 } 2906 - if (m.flags & CEPH_OSD_FLAG_ONDISK) 2907 - complete_all(&req->r_safe_completion); 2908 2904 2909 - ceph_osdc_put_request(req); 2910 2905 return; 2911 2906 2912 2907 fail_request: ··· 3049 3050 bool skipped_map = false; 3050 3051 bool was_full; 3051 3052 3052 - was_full = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL); 3053 + was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL); 3053 3054 set_pool_was_full(osdc); 3054 3055 3055 3056 if (incremental) ··· 3087 3088 osdc->osdmap = newmap; 3088 3089 } 3089 3090 3090 - was_full &= !ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL); 3091 + was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL); 3091 3092 scan_requests(&osdc->homeless_osd, skipped_map, was_full, true, 3092 3093 need_resend, need_resend_linger); 3093 3094 ··· 3173 3174 if (ceph_check_fsid(osdc->client, &fsid) < 0) 3174 3175 goto bad; 3175 3176 3176 - was_pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD); 3177 - was_pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) || 3178 - ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || 3177 + was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); 3178 + was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || 3179 + ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 3179 3180 have_pool_full(osdc); 3180 3181 3181 3182 /* incremental maps */ ··· 3237 3238 * we find out when we are no longer full and stop returning 3238 3239 * ENOSPC. 3239 3240 */ 3240 - pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD); 3241 - pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) || 3242 - ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || 3241 + pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); 3242 + pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || 3243 + ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 3243 3244 have_pool_full(osdc); 3244 3245 if (was_pauserd || was_pausewr || pauserd || pausewr) 3245 3246 maybe_request_map(osdc);
+2 -2
net/ceph/osdmap.c
··· 1778 1778 raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name, 1779 1779 oid->name_len); 1780 1780 1781 - dout("%s %*pE -> raw_pgid %llu.%x\n", __func__, oid->name_len, 1782 - oid->name, raw_pgid->pool, raw_pgid->seed); 1781 + dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name, 1782 + raw_pgid->pool, raw_pgid->seed); 1783 1783 return 0; 1784 1784 } 1785 1785 EXPORT_SYMBOL(ceph_object_locator_to_pg);